From cce93208f383969d718c92c526c5e834cd3a2733 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 18 Oct 2019 22:43:09 +0200 Subject: commit first draft of barycenter.py --- src/python/gudhi/barycenter.py | 187 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 src/python/gudhi/barycenter.py diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py new file mode 100644 index 00000000..c46f6926 --- /dev/null +++ b/src/python/gudhi/barycenter.py @@ -0,0 +1,187 @@ +import ot +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.patches import Polygon + +def _proj_on_diag(x): + return np.array([(x[0] + x[1]) / 2, (x[0] + x[1]) / 2]) + + +def _norm2(x, y): + return (y[0] - x[0])**2 + (y[1] - x[1])**2 + + +def _norm_inf(x, y): + return np.max(np.abs(y[0] - x[0]), np.abs(y[1] - x[1])) + + +def _cost_matrix(X, Y): + """ + :param X: (n x 2) numpy.array encoding the first diagram + :param Y: (m x 2) numpy.array encoding the second diagram + :return: The cost matrix with size (k x k) where k = |d_1| + |d_2| in order to encode matching to diagonal + """ + n, m = len(X), len(Y) + k = n + m + M = np.zeros((k, k)) + for i in range(n): # go throught X points + x_i = X[i] + p_x_i = _proj_on_diag(x_i) # proj of x_i on the diagonal + dist_x_delta = _norm2(x_i, p_x_i) # distance to the diagonal regarding the ground norm + for j in range(m): # go throught d_2 points + y_j = Y[j] + p_y_j = _proj_on_diag(y_j) + M[i, j] = _norm2(x_i, y_j) + dist_y_delta = _norm2(y_j, p_y_j) + for it in range(m): + M[n + it, j] = dist_y_delta + for it in range(n): + M[i, m + it] = dist_x_delta + + return M + + +def _optimal_matching(M): + n = len(M) + # if input weights are empty lists, pot treat the uniform assignement problem and returns a bistochastic matrix (up to *n). + P = ot.emd(a=[], b=[], M=M) * n + # return the list of indices j such that L[i] = j iff P[i,j] = 1 + return np.nonzero(P)[1] + + +def _mean(x, m): + """ + :param x: a list of 2D-points, of diagonal, x_0... x_{k-1} + :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal + :returns: the weighted mean of x with (m-k) copies of Delta taken into account (defined by mukherjee etc.) + """ + k = len(x) + if k > 0: + w = np.mean(x, axis=0) + w_delta = _proj_on_diag(w) + return (k * w + (m-k) * w_delta) / m + else: + return np.array([0, 0]) + + +def lagrangian_barycenter(pdiagset, init=None, verbose=False): + """ + Compute the estimated barycenter computed with the Hungarian algorithm provided by Mukherjee et al + It is a local minima of the corresponding Frechet function. + It exactly belongs to the persistence diagram space (because all computations are made on it). + :param pdiagset: a list of size N containing numpy.array of shape (n x + 2) (n can variate), encoding a set of persistence diagrams with only finite + coordinates. + :param init: The initial value for barycenter estimate. If None, init is made on a random diagram from the dataset. Otherwise, it must be a (n x 2) numpy.array enconding a persistence diagram with n points. + :returns: If not verbose (default), the barycenter estimate (local minima of the energy function). If verbose, returns a triplet (Y, a, e) where Y is the barycenter estimate, a is the assignments between the points of Y and thoses of the diagrams, and e is the energy value reached by the estimate. + """ + m = len(pdiagset) # number of diagrams we are averaging + X = pdiagset # to shorten notations + nb_off_diag = np.array([len(X_i) for X_i in X]) # store the number of off-diagonal point for each of the X_i + + # Initialisation of barycenter + if init is None: + i0 = np.random.randint(m) # Index of first state for the barycenter + Y = X[i0].copy() + else: + Y = init.copy() + + not_converged = True # stoping criterion + while not_converged: + K = len(Y) # current nb of points in Y (some might be on diagonal) + G = np.zeros((K, m)) # will store for each j, the (index) point matched in each other diagram (might be the diagonal). + updated_points = np.zeros((K, 2)) # will store the new positions of the points of Y + new_created_points = [] # will store eventual new points. + + # Step 1 : compute optimal matching (Y, X_i) for each X_i + for i in range(m): + M = _cost_matrix(Y, X[i]) + indices = _optimal_matching(M) + for y_j, x_i_j in enumerate(indices): + if y_j < K: # we matched an off diagonal point to x_i_j... + if x_i_j < nb_off_diag[i]: # ...which is also an off-diagonal point + G[y_j, i] = x_i_j + else: # ...which is a diagonal point + G[y_j, i] = -1 # -1 stands for the diagonal (mask) + else: # We matched a diagonal point to x_i_j... + if x_i_j < nb_off_diag[i]: # which is a off-diag point ! so we need to create a new point in Y + new_y = _mean(np.array([X[i][x_i_j]]), m) # Average this point with (m-1) copies of Delta + new_created_points.append(new_y) + + # Step 2 : Compute new points (mean) + for j in range(K): + matched_points = [X[i][int(G[j, i])] for i in range(m) if G[j, i] > -1] + updated_points[j] = _mean(matched_points, m) + + if new_created_points: + Y = np.concatenate((updated_points, new_created_points)) + else: + Y = updated_points + + # Step 3 : we update our estimation of the barycenter + if len(new_created_points) == 0 and np.array_equal(updated_points, Y): + not_converged = False + + if verbose: + matchings = [] + energy = 0 + n_y = len(Y) + for i in range(m): + M = _cost_matrix(Y, X[i]) + edges = _optimal_matching(M) + matchings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) + #energy += total_cost + + #energy /= m + _plot_barycenter(X, Y, matchings) + plt.show() + return Y, matchings, energy + else: + return Y + +def _plot_barycenter(X, Y, matchings): + fig = plt.figure() + ax = fig.add_subplot(111) + + # n_y = len(Y.points) + for i in range(len(X)): + indices = matchings[i] + n_i = len(X[i]) + + for (y_j, x_i_j) in enumerate(indices): + y = Y[y_j] + if y[0] != y[1]: + if x_i_j < n_i: # not mapped with the diag + x = X[i][x_i_j] + else: # y_j is matched to the diagonal + x = _proj_on_diag(y) + ax.plot([y[0], x[0]], [y[1], x[1]], c='black', + linestyle="dashed") + + ax.scatter(Y[:,0], Y[:,1], color='purple', marker='d') + + for dgm in X: + ax.scatter(dgm[:,0], dgm[:,1], marker ='o') + + shift = 0.1 # for improved rendering + xmin = min([np.min(x[:,0]) for x in X]) - shift + xmax = max([np.max(x[:,0]) for x in X]) + shift + ymin = min([np.max(x[:,1]) for x in X]) - shift + ymax = max([np.max(x[:,1]) for x in X]) + shift + themin = min(xmin, ymin) + themax = max(xmax, ymax) + ax.set_xlim(themin, themax) + ax.set_ylim(themin, themax) + ax.add_patch(Polygon([[themin,themin], [themax,themin], [themax,themax]], fill=True, color='lightgrey')) + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_aspect('equal', adjustable='box') + ax.set_title("example of (estimated) barycenter") + + +if __name__=="__main__": + dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) + dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) + dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) + X = [dg1, dg2, dg3] + Y, a, e = lagrangian_barycenter(X, verbose=True) -- cgit v1.2.3 From 48f7e17c5e9d4f6936bfdf6384015fe833e30c74 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 18 Oct 2019 23:18:53 +0200 Subject: updated documentation in barycenter.py --- src/python/gudhi/barycenter.py | 78 ++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index c46f6926..85666631 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -4,22 +4,30 @@ import matplotlib.pyplot as plt from matplotlib.patches import Polygon def _proj_on_diag(x): + """ + :param x: numpy.array of length 2, encoding a point on the upper half plane. + :returns: numpy.array of length 2, orthogonal projection of the point onto + the diagonal. + """ return np.array([(x[0] + x[1]) / 2, (x[0] + x[1]) / 2]) def _norm2(x, y): + """ + :param x: numpy.array of length 2, encoding a point on the upper half plane. + :param y: numpy.array of length 2, encoding a point on the upper half plane. + :returns: distance between the two points for the euclidean norm. + """ return (y[0] - x[0])**2 + (y[1] - x[1])**2 -def _norm_inf(x, y): - return np.max(np.abs(y[0] - x[0]), np.abs(y[1] - x[1])) - - def _cost_matrix(X, Y): """ :param X: (n x 2) numpy.array encoding the first diagram :param Y: (m x 2) numpy.array encoding the second diagram - :return: The cost matrix with size (k x k) where k = |d_1| + |d_2| in order to encode matching to diagonal + :return: numpy.array with size (k x k) where k = |X| + |Y|, encoding the + cost matrix between points (including the diagonal, with repetition to + ensure one-to-one matchings. """ n, m = len(X), len(Y) k = n + m @@ -42,8 +50,15 @@ def _cost_matrix(X, Y): def _optimal_matching(M): + """ + :param M: numpy.array of size (k x k), encoding the cost matrix between the + points of two diagrams. + :returns: list of length (k) such that L[i] = j if and only if P[i,j]=1 + where P is a bi-stochastic matrix that minimize . + """ n = len(M) - # if input weights are empty lists, pot treat the uniform assignement problem and returns a bistochastic matrix (up to *n). + # if input weights are empty lists, pot treats the uniform assignement + # problem and returns a bistochastic matrix (up to *n). P = ot.emd(a=[], b=[], M=M) * n # return the list of indices j such that L[i] = j iff P[i,j] = 1 return np.nonzero(P)[1] @@ -53,7 +68,8 @@ def _mean(x, m): """ :param x: a list of 2D-points, of diagonal, x_0... x_{k-1} :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal - :returns: the weighted mean of x with (m-k) copies of Delta taken into account (defined by mukherjee etc.) + :returns: the weighted mean of x with (m-k) copies of Delta taken into + account. """ k = len(x) if k > 0: @@ -66,14 +82,23 @@ def _mean(x, m): def lagrangian_barycenter(pdiagset, init=None, verbose=False): """ - Compute the estimated barycenter computed with the Hungarian algorithm provided by Mukherjee et al - It is a local minima of the corresponding Frechet function. - It exactly belongs to the persistence diagram space (because all computations are made on it). - :param pdiagset: a list of size N containing numpy.array of shape (n x - 2) (n can variate), encoding a set of persistence diagrams with only finite - coordinates. - :param init: The initial value for barycenter estimate. If None, init is made on a random diagram from the dataset. Otherwise, it must be a (n x 2) numpy.array enconding a persistence diagram with n points. - :returns: If not verbose (default), the barycenter estimate (local minima of the energy function). If verbose, returns a triplet (Y, a, e) where Y is the barycenter estimate, a is the assignments between the points of Y and thoses of the diagrams, and e is the energy value reached by the estimate. + Compute the estimated barycenter computed with the algorithm provided + by Turner et al (2014). + It is a local minima of the corresponding Frechet function. + :param pdiagset: a list of size N containing numpy.array of shape (n x 2) + (n can variate), encoding a set of + persistence diagrams with only finite coordinates. + :param init: The initial value for barycenter estimate. + If None, init is made on a random diagram from the dataset. + Otherwise, it must be a (n x 2) numpy.array enconding a persistence diagram with n points. + :param verbose: if True, returns additional information about the + barycenters (assignment and energy). + :returns: If not verbose (default), a numpy.array encoding + the barycenter estimate (local minima of the energy function). + If verbose, returns a triplet (Y, a, e) + where Y is the barycenter estimate, a is the assignments between the + points of Y and thoses of the diagrams, + and e is the energy value reached by the estimate. """ m = len(pdiagset) # number of diagrams we are averaging X = pdiagset # to shorten notations @@ -90,7 +115,10 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): while not_converged: K = len(Y) # current nb of points in Y (some might be on diagonal) G = np.zeros((K, m)) # will store for each j, the (index) point matched in each other diagram (might be the diagonal). - updated_points = np.zeros((K, 2)) # will store the new positions of the points of Y + updated_points = np.zeros((K, 2)) # will store the new positions of + # the points of Y. + # If points disappear, there thrown + # on [0,0] by default. new_created_points = [] # will store eventual new points. # Step 1 : compute optimal matching (Y, X_i) for each X_i @@ -130,16 +158,22 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): M = _cost_matrix(Y, X[i]) edges = _optimal_matching(M) matchings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) - #energy += total_cost + energy += sum([M[i,j] for i,j in enumerate(edges)]) - #energy /= m - _plot_barycenter(X, Y, matchings) - plt.show() + energy = energy/m return Y, matchings, energy else: return Y def _plot_barycenter(X, Y, matchings): + """ + :param X: list of persistence diagrams. + :param Y: numpy.array of (n x 2). Aims to be an estimate of the barycenter + returned by lagrangian_barycenter(X, verbose=True). + :param matchings: list of lists, such that L[k][i] = j if and only if + the i-th point of the barycenter is grouped with the j-th point of the k-th + diagram. + """ fig = plt.figure() ax = fig.add_subplot(111) @@ -176,7 +210,7 @@ def _plot_barycenter(X, Y, matchings): ax.set_xticks([]) ax.set_yticks([]) ax.set_aspect('equal', adjustable='box') - ax.set_title("example of (estimated) barycenter") + ax.set_title("Estimated barycenter") if __name__=="__main__": @@ -185,3 +219,5 @@ if __name__=="__main__": dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) X = [dg1, dg2, dg3] Y, a, e = lagrangian_barycenter(X, verbose=True) + _plot_barycenter(X, Y, a) + plt.show() -- cgit v1.2.3 From e56c6dbeb1b4a0139e3d329e4d29a71c65f28ba9 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 4 Dec 2019 09:35:51 +0100 Subject: Delaunay triangulation for alpha complex in dD --- src/Alpha_complex/doc/Intro_alpha_complex.h | 13 +-- src/Alpha_complex/include/gudhi/Alpha_complex.h | 117 +++++++++++++----------- src/python/include/Alpha_complex_interface.h | 2 +- 3 files changed, 70 insertions(+), 62 deletions(-) diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h index 3c32a1e6..6931420a 100644 --- a/src/Alpha_complex/doc/Intro_alpha_complex.h +++ b/src/Alpha_complex/doc/Intro_alpha_complex.h @@ -47,15 +47,16 @@ namespace alpha_complex { * * \remark * - When the simplicial complex is constructed with an infinite value of alpha, the complex is a Delaunay - * complex. + * complex with filtration values. The Delaunay complex without filtartion values is also available by passing + * `default_filtration_value=true` to `Alpha_complex::create_complex`. * - For people only interested in the topology of the \ref alpha_complex (for instance persistence), * \ref alpha_complex is equivalent to the \ref cech_complex and much smaller if you do not bound the radii. * \ref cech_complex can still make sense in higher dimension precisely because you can bound the radii. - * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass exact=true to create_complex, the - * filtration values are the exact ones converted to the filtration value type of the simplicial complex. This can be - * very slow. If you pass exact=false (the default), the filtration values are only guaranteed to have a small - * multiplicative error compared to the exact value, see + * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass `exact=true` to + * `Alpha_complex::create_complex`, the filtration values are the exact ones converted to the filtration value type of + * the simplicial complex. This can be very slow. If you pass `exact=false` (the default), the filtration values are + * only guaranteed to have a small multiplicative error compared to the exact value, see + * * CGAL::Lazy_exact_nt::set_relative_precision_of_to_double for details. A drawback, when computing * persistence, is that an empty exact interval [10^12,10^12] may become a non-empty approximate interval * [10^12,10^12+10^6]. Using `CGAL::Epick_d` makes the computations slightly faster, and the combinatorics are still diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index 6b4d8463..13fcae99 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -254,16 +254,20 @@ class Alpha_complex { public: /** \brief Inserts all Delaunay triangulation into the simplicial complex. - * It also computes the filtration values accordingly to the \ref createcomplexalgorithm + * It also computes the filtration values accordingly to the \ref createcomplexalgorithm if default_filtration_value + * is not set. * * \tparam SimplicialComplexForAlpha must meet `SimplicialComplexForAlpha` concept. * * @param[in] complex SimplicialComplexForAlpha to be created. * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$, and there is very - * little point using anything else since it does not save time. + * little point using anything else since it does not save time. Useless if `default_filtration_value` is set to + * `true`. * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not CGAL::Epeck_d. - * + * @param[in] default_filtration_value Set this value to `true` if filtration values are not needed to be computed. + * Default value is `false` (which means compute the filtration values). + * * @return true if creation succeeds, false otherwise. * * @pre Delaunay triangulation must be already constructed with dimension strictly greater than 0. @@ -275,7 +279,8 @@ class Alpha_complex { typename Filtration_value = typename SimplicialComplexForAlpha::Filtration_value> bool create_complex(SimplicialComplexForAlpha& complex, Filtration_value max_alpha_square = std::numeric_limits::infinity(), - bool exact = false) { + bool exact = false, + bool default_filtration_value = false) { // From SimplicialComplexForAlpha type required to insert into a simplicial complex (with or without subfaces). typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle; typedef typename SimplicialComplexForAlpha::Simplex_handle Simplex_handle; @@ -322,62 +327,64 @@ class Alpha_complex { } // -------------------------------------------------------------------------------------------- - // -------------------------------------------------------------------------------------------- - // Will be re-used many times - Vector_of_CGAL_points pointVector; - // ### For i : d -> 0 - for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) { - // ### Foreach Sigma of dim i - for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) { - int f_simplex_dim = complex.dimension(f_simplex); - if (decr_dim == f_simplex_dim) { - pointVector.clear(); -#ifdef DEBUG_TRACES - std::cout << "Sigma of dim " << decr_dim << " is"; -#endif // DEBUG_TRACES - for (auto vertex : complex.simplex_vertex_range(f_simplex)) { - pointVector.push_back(get_point(vertex)); -#ifdef DEBUG_TRACES - std::cout << " " << vertex; -#endif // DEBUG_TRACES - } -#ifdef DEBUG_TRACES - std::cout << std::endl; -#endif // DEBUG_TRACES - // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma) - if (std::isnan(complex.filtration(f_simplex))) { - Filtration_value alpha_complex_filtration = 0.0; - // No need to compute squared_radius on a single point - alpha is 0.0 - if (f_simplex_dim > 0) { - // squared_radius function initialization - Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object(); - - CGAL::NT_converter cv; - auto sqrad = squared_radius(pointVector.begin(), pointVector.end()); -#if CGAL_VERSION_NR >= 1050000000 - if(exact) CGAL::exact(sqrad); -#endif - alpha_complex_filtration = cv(sqrad); + if (!default_filtration_value) { + // -------------------------------------------------------------------------------------------- + // Will be re-used many times + Vector_of_CGAL_points pointVector; + // ### For i : d -> 0 + for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) { + // ### Foreach Sigma of dim i + for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) { + int f_simplex_dim = complex.dimension(f_simplex); + if (decr_dim == f_simplex_dim) { + pointVector.clear(); + #ifdef DEBUG_TRACES + std::cout << "Sigma of dim " << decr_dim << " is"; + #endif // DEBUG_TRACES + for (auto vertex : complex.simplex_vertex_range(f_simplex)) { + pointVector.push_back(get_point(vertex)); + #ifdef DEBUG_TRACES + std::cout << " " << vertex; + #endif // DEBUG_TRACES } - complex.assign_filtration(f_simplex, alpha_complex_filtration); -#ifdef DEBUG_TRACES - std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl; -#endif // DEBUG_TRACES + #ifdef DEBUG_TRACES + std::cout << std::endl; + #endif // DEBUG_TRACES + // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma) + if (std::isnan(complex.filtration(f_simplex))) { + Filtration_value alpha_complex_filtration = 0.0; + // No need to compute squared_radius on a single point - alpha is 0.0 + if (f_simplex_dim > 0) { + // squared_radius function initialization + Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object(); + + CGAL::NT_converter cv; + auto sqrad = squared_radius(pointVector.begin(), pointVector.end()); + #if CGAL_VERSION_NR >= 1050000000 + if(exact) CGAL::exact(sqrad); + #endif + alpha_complex_filtration = cv(sqrad); + } + complex.assign_filtration(f_simplex, alpha_complex_filtration); + #ifdef DEBUG_TRACES + std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl; + #endif // DEBUG_TRACES + } + // No need to propagate further, unweighted points all have value 0 + if (decr_dim > 1) + propagate_alpha_filtration(complex, f_simplex); } - // No need to propagate further, unweighted points all have value 0 - if (decr_dim > 1) - propagate_alpha_filtration(complex, f_simplex); } } + // -------------------------------------------------------------------------------------------- + + // -------------------------------------------------------------------------------------------- + // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension + complex.make_filtration_non_decreasing(); + // Remove all simplices that have a filtration value greater than max_alpha_square + complex.prune_above_filtration(max_alpha_square); + // -------------------------------------------------------------------------------------------- } - // -------------------------------------------------------------------------------------------- - - // -------------------------------------------------------------------------------------------- - // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension - complex.make_filtration_non_decreasing(); - // Remove all simplices that have a filtration value greater than max_alpha_square - complex.prune_above_filtration(max_alpha_square); - // -------------------------------------------------------------------------------------------- return true; } diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h index 96353cc4..3e878502 100644 --- a/src/python/include/Alpha_complex_interface.h +++ b/src/python/include/Alpha_complex_interface.h @@ -60,7 +60,7 @@ class Alpha_complex_interface { } void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) { - alpha_complex_->create_complex(*simplex_tree, max_alpha_square); + alpha_complex_->create_complex(*simplex_tree, max_alpha_square, false, false); simplex_tree->initialize_filtration(); } -- cgit v1.2.3 From 7cd38c50d3843fd222d4210507bad948afe63912 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 4 Dec 2019 10:02:22 +0100 Subject: Code review : let's default values play their role --- src/python/include/Alpha_complex_interface.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h index 3e878502..96353cc4 100644 --- a/src/python/include/Alpha_complex_interface.h +++ b/src/python/include/Alpha_complex_interface.h @@ -60,7 +60,7 @@ class Alpha_complex_interface { } void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) { - alpha_complex_->create_complex(*simplex_tree, max_alpha_square, false, false); + alpha_complex_->create_complex(*simplex_tree, max_alpha_square); simplex_tree->initialize_filtration(); } -- cgit v1.2.3 From 80aa14d1b92d1a61366d798b07073289d4db4fda Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 5 Dec 2019 18:42:48 +0100 Subject: first version of barycenter for persistence diagrams --- src/python/doc/barycenter_sum.inc | 22 +++ src/python/doc/barycenter_user.rst | 51 ++++++ src/python/gudhi/barycenter.py | 322 +++++++++++++++++++++++++------------ 3 files changed, 292 insertions(+), 103 deletions(-) create mode 100644 src/python/doc/barycenter_sum.inc create mode 100644 src/python/doc/barycenter_user.rst diff --git a/src/python/doc/barycenter_sum.inc b/src/python/doc/barycenter_sum.inc new file mode 100644 index 00000000..7801a845 --- /dev/null +++ b/src/python/doc/barycenter_sum.inc @@ -0,0 +1,22 @@ +.. table:: + :widths: 30 50 20 + + +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ + | .. figure:: | A Frechet mean (or barycenter) is a generalization of the arithmetic | :Author: Theo Lacombe | + | ../../doc/Barycenter/barycenter.png | mean in a non linear space such as the one of persistence diagrams. | | + | :figclass: align-center | Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is | :Introduced in: GUDHI 3.1.0 | + | | defined as a minimizer of the variance functional, that is of | | + | Illustration of Frechet mean between persistence | :math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. | :Copyright: MIT | + | diagrams. | where :math:`d_2` denotes the Wasserstein-2 distance between persis- | | + | | tence diagrams. | | + | | It is known to exist and is generically unique. However, an exact | | + | | computation is in general untractable. Current implementation avai- | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | + | | -lable is based on [Turner et al, 2014], and uses an EM-scheme to | | + | | provide a local minimum of the variance functional (somewhat similar | | + | | to the Lloyd algorithm to estimate a solution to the k-means | | + | | problem). The combinatorial structure of the algorithm limits its | | + | | scaling on large scale problems (thousands of diagrams and of points | | + | | per diagram). | | + +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ + | * :doc:`barycenter_user` | | + +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst new file mode 100644 index 00000000..fae2854a --- /dev/null +++ b/src/python/doc/barycenter_user.rst @@ -0,0 +1,51 @@ +:orphan: + +.. To get rid of WARNING: document isn't included in any toctree + +Wasserstein distance user manual +================================ +Definition +---------- + +.. include:: wasserstein_distance_sum.inc + +This implementation is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport". + +Function +-------- +.. autofunction:: gudhi.barycenter.lagrangian_barycenter + + +Basic example +------------- + +This example computes the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. +It is initialized on the 4th diagram, which is the empty diagram. It is encoded by np.array([]). +Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. + +.. testcode:: + + import gudhi.barycenter + import numpy as np + + dg1 = np.array([[0.2, 0.5]]) + dg2 = np.array([[0.2, 0.7]]) + dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + dg4 = np.array([]) + + bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3)) + + message = "Wasserstein barycenter estimated:" + print(message) + print(bary) + +The output is: + +.. testoutput:: + + Wasserstein barycenter estimated: + [[0.27916667 0.55416667] + [0.7375 0.7625 ] + [0.2375 0.2625 ]] + + diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 85666631..3cd214a7 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -1,75 +1,105 @@ import ot import numpy as np -import matplotlib.pyplot as plt -from matplotlib.patches import Polygon +import scipy.spatial.distance as sc -def _proj_on_diag(x): - """ - :param x: numpy.array of length 2, encoding a point on the upper half plane. - :returns: numpy.array of length 2, orthogonal projection of the point onto - the diagonal. - """ - return np.array([(x[0] + x[1]) / 2, (x[0] + x[1]) / 2]) +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Theo Lacombe +# +# Copyright (C) 2019 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification -def _norm2(x, y): - """ - :param x: numpy.array of length 2, encoding a point on the upper half plane. - :param y: numpy.array of length 2, encoding a point on the upper half plane. - :returns: distance between the two points for the euclidean norm. - """ - return (y[0] - x[0])**2 + (y[1] - x[1])**2 +def _proj_on_diag(w): + ''' + Util function to project a point on the diag. + ''' + return np.array([(w[0] + w[1])/2 , (w[0] + w[1])/2]) -def _cost_matrix(X, Y): - """ - :param X: (n x 2) numpy.array encoding the first diagram - :param Y: (m x 2) numpy.array encoding the second diagram - :return: numpy.array with size (k x k) where k = |X| + |Y|, encoding the - cost matrix between points (including the diagonal, with repetition to - ensure one-to-one matchings. - """ - n, m = len(X), len(Y) - k = n + m - M = np.zeros((k, k)) - for i in range(n): # go throught X points - x_i = X[i] - p_x_i = _proj_on_diag(x_i) # proj of x_i on the diagonal - dist_x_delta = _norm2(x_i, p_x_i) # distance to the diagonal regarding the ground norm - for j in range(m): # go throught d_2 points - y_j = Y[j] - p_y_j = _proj_on_diag(y_j) - M[i, j] = _norm2(x_i, y_j) - dist_y_delta = _norm2(y_j, p_y_j) - for it in range(m): - M[n + it, j] = dist_y_delta - for it in range(n): - M[i, m + it] = dist_x_delta - - return M - - -def _optimal_matching(M): + +def _proj_on_diag_array(X): + ''' + :param X: (n x 2) array encoding the points of a persistent diagram. + :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal + ''' + Z = (X[:,0] + X[:,1]) / 2. + return np.array([Z , Z]).T + + +def _build_dist_matrix(X, Y, p=2., q=2.): + ''' + :param X: (n x 2) numpy.array encoding the (points of the) first diagram. + :param Y: (m x 2) numpy.array encoding the second diagram. + :param q: Ground metric (i.e. norm l_q). + :param p: exponent for the Wasserstein metric. + :returns: (n+1) x (m+1) np.array encoding the cost matrix C. + For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal. + note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal). + Note that for lagrangian_barycenter, one must use p=q=2. + ''' + Xdiag = _proj_on_diag_array(X) + Ydiag = _proj_on_diag_array(Y) + if np.isinf(q): + C = sc.cdist(X, Y, metric='chebyshev')**p + Cxd = np.linalg.norm(X - Xdiag, ord=q, axis=1)**p + Cdy = np.linalg.norm(Y - Ydiag, ord=q, axis=1)**p + else: + C = sc.cdist(X,Y, metric='minkowski', p=q)**p + Cxd = np.linalg.norm(X - Xdiag, ord=q, axis=1)**p + Cdy = np.linalg.norm(Y - Ydiag, ord=q, axis=1)**p + Cf = np.hstack((C, Cxd[:,None])) + Cdy = np.append(Cdy, 0) + + Cf = np.vstack((Cf, Cdy[None,:])) + + return Cf + + +def _optimal_matching(X, Y): """ - :param M: numpy.array of size (k x k), encoding the cost matrix between the - points of two diagrams. - :returns: list of length (k) such that L[i] = j if and only if P[i,j]=1 - where P is a bi-stochastic matrix that minimize . + :param X: numpy.array of size (n x 2) + :param Y: numpy.array of size (m x 2) + :returns: numpy.array of shape (k x 2) encoding the list of edges in the optimal matching. + That is, [[(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] + if i > len(X) or j > len(Y), it means they represent the diagonal. + """ - n = len(M) - # if input weights are empty lists, pot treats the uniform assignement - # problem and returns a bistochastic matrix (up to *n). - P = ot.emd(a=[], b=[], M=M) * n - # return the list of indices j such that L[i] = j iff P[i,j] = 1 - return np.nonzero(P)[1] + + n = len(X) + m = len(Y) + if X.size == 0: # X is empty + if Y.size == 0: # Y is empty + return np.array([[0,0]]) # the diagonal is matched to the diagonal and that's it... + else: + return np.column_stack([np.zeros(m+1, dtype=int), np.arange(m+1, dtype=int)]) # TO BE CORRECTED + elif Y.size == 0: # X is not empty but Y is empty + return np.column_stack([np.zeros(n+1, dtype=int), np.arange(n+1, dtype=int)]) # TO BE CORRECTED + + # we know X, Y are not empty diags now + M = _build_dist_matrix(X, Y) + + a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. + a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT + b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. + b[-1] = b[-1] * n # so that we have a probability measure, required by POT + P = ot.emd(a=a, b=b, M=M)*(n+m) + # Note : it seems POT return a permutation matrix in this situation, + # ...guarantee...? + # It should be enough to check that the algorithm only iterates on vertices of the transportation polytope. + P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to be improved. + # return the list of (i,j) such that P[i,j] > 0, i.e. x_i is matched to y_j (should it be the diag). + res = np.nonzero(P) + return np.column_stack(res) def _mean(x, m): """ - :param x: a list of 2D-points, of diagonal, x_0... x_{k-1} + :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal - :returns: the weighted mean of x with (m-k) copies of Delta taken into - account. + :returns: the weighted mean of x with (m-k) copies of the diagonal """ k = len(x) if k > 0: @@ -88,44 +118,54 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): :param pdiagset: a list of size N containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. - :param init: The initial value for barycenter estimate. - If None, init is made on a random diagram from the dataset. - Otherwise, it must be a (n x 2) numpy.array enconding a persistence diagram with n points. - :param verbose: if True, returns additional information about the - barycenters (assignment and energy). - :returns: If not verbose (default), a numpy.array encoding + :param init: The initial value for barycenter estimate. + If None, init is made on a random diagram from the dataset. + Otherwise, it must be an int (then we init with diagset[init]) + or a (n x 2) numpy.array enconding a persistence diagram with n points. + :param verbose: if True, returns additional information about the + barycenters (assignment and energy). + :returns: If not verbose (default), a numpy.array encoding the barycenter estimate (local minima of the energy function). If verbose, returns a triplet (Y, a, e) where Y is the barycenter estimate, a is the assignments between the points of Y and thoses of the diagrams, and e is the energy value reached by the estimate. """ - m = len(pdiagset) # number of diagrams we are averaging - X = pdiagset # to shorten notations + X = pdiagset # to shorten notations, not a copy + m = len(X) # number of diagrams we are averaging + if m == 0: + print("Warning: computing barycenter of empty diag set. Returns None") + return None + nb_off_diag = np.array([len(X_i) for X_i in X]) # store the number of off-diagonal point for each of the X_i # Initialisation of barycenter if init is None: i0 = np.random.randint(m) # Index of first state for the barycenter - Y = X[i0].copy() + Y = X[i0].copy() #copy() ensure that we do not modify X[i0] else: - Y = init.copy() + if type(init)==int: + Y = X[init].copy() + else: + Y = init.copy() - not_converged = True # stoping criterion - while not_converged: + converged = False # stoping criterion + while not converged: K = len(Y) # current nb of points in Y (some might be on diagonal) - G = np.zeros((K, m)) # will store for each j, the (index) point matched in each other diagram (might be the diagonal). + G = np.zeros((K, m), dtype=int)-1 # will store for each j, the (index) point matched in each other diagram (might be the diagonal). + # that is G[j, i] = k <=> y_j is matched to + # x_k in the diagram i-th diagram X[i] updated_points = np.zeros((K, 2)) # will store the new positions of # the points of Y. # If points disappear, there thrown # on [0,0] by default. - new_created_points = [] # will store eventual new points. + new_created_points = [] # will store potential new points. # Step 1 : compute optimal matching (Y, X_i) for each X_i + # and create new points in Y if needed for i in range(m): - M = _cost_matrix(Y, X[i]) - indices = _optimal_matching(M) - for y_j, x_i_j in enumerate(indices): + indices = _optimal_matching(Y, X[i]) + for y_j, x_i_j in indices: if y_j < K: # we matched an off diagonal point to x_i_j... if x_i_j < nb_off_diag[i]: # ...which is also an off-diagonal point G[y_j, i] = x_i_j @@ -136,32 +176,40 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): new_y = _mean(np.array([X[i][x_i_j]]), m) # Average this point with (m-1) copies of Delta new_created_points.append(new_y) - # Step 2 : Compute new points (mean) + # Step 2 : Update current point position thanks to the groupings computed + + to_delete = [] for j in range(K): - matched_points = [X[i][int(G[j, i])] for i in range(m) if G[j, i] > -1] - updated_points[j] = _mean(matched_points, m) + matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] + new_y_j = _mean(matched_points, m) + if not np.array_equal(new_y_j, np.array([0,0])): + updated_points[j] = new_y_j + else: # this points is no longer of any use. + to_delete.append(j) + # we remove the point to be deleted now. + updated_points = np.delete(updated_points, to_delete, axis=0) # cannot be done in-place. - if new_created_points: + + if new_created_points: # we cannot converge if there have been new created points. Y = np.concatenate((updated_points, new_created_points)) else: + # Step 3 : we check convergence + if np.array_equal(updated_points, Y): + converged = True Y = updated_points - # Step 3 : we update our estimation of the barycenter - if len(new_created_points) == 0 and np.array_equal(updated_points, Y): - not_converged = False if verbose: matchings = [] - energy = 0 + #energy = 0 n_y = len(Y) for i in range(m): - M = _cost_matrix(Y, X[i]) - edges = _optimal_matching(M) + edges = _optimal_matching(Y, X[i]) matchings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) - energy += sum([M[i,j] for i,j in enumerate(edges)]) + # energy += sum([M[i,j] for i,j in enumerate(edges)]) - energy = energy/m - return Y, matchings, energy + # energy = energy/m + return Y, matchings #, energy else: return Y @@ -174,6 +222,11 @@ def _plot_barycenter(X, Y, matchings): the i-th point of the barycenter is grouped with the j-th point of the k-th diagram. """ + # import matplotlib now to avoid useless dependancies + + import matplotlib.pyplot as plt + from matplotlib.patches import Polygon + fig = plt.figure() ax = fig.add_subplot(111) @@ -182,7 +235,7 @@ def _plot_barycenter(X, Y, matchings): indices = matchings[i] n_i = len(X[i]) - for (y_j, x_i_j) in enumerate(indices): + for (y_j, x_i_j) in indices: y = Y[y_j] if y[0] != y[1]: if x_i_j < n_i: # not mapped with the diag @@ -192,16 +245,20 @@ def _plot_barycenter(X, Y, matchings): ax.plot([y[0], x[0]], [y[1], x[1]], c='black', linestyle="dashed") - ax.scatter(Y[:,0], Y[:,1], color='purple', marker='d') + ax.scatter(Y[:,0], Y[:,1], color='purple', marker='d', zorder=2) - for dgm in X: - ax.scatter(dgm[:,0], dgm[:,1], marker ='o') + for X_i in X: + if X_i.size > 0: + ax.scatter(X_i[:,0], X_i[:,1], marker ='o', zorder=2) shift = 0.1 # for improved rendering - xmin = min([np.min(x[:,0]) for x in X]) - shift - xmax = max([np.max(x[:,0]) for x in X]) + shift - ymin = min([np.max(x[:,1]) for x in X]) - shift - ymax = max([np.max(x[:,1]) for x in X]) + shift + try: + xmin = np.min(np.array([np.min(x[:,0]) for x in X if len(x) > 0]) - shift) + xmax = np.max(np.array([np.max(x[:,0]) for x in X if len(x) > 0]) + shift) + ymin = np.min(np.array([np.max(x[:,1]) for x in X if len(x) > 0]) - shift) + ymax = np.max(np.array([np.max(x[:,1]) for x in X if len(x) > 0]) + shift) + except ValueError: # to handle the pecular case where we only average empty diagrams. + xmin, xmax, ymin, ymax = 0, 1, 0, 1 themin = min(xmin, ymin) themax = max(xmax, ymax) ax.set_xlim(themin, themax) @@ -212,12 +269,71 @@ def _plot_barycenter(X, Y, matchings): ax.set_aspect('equal', adjustable='box') ax.set_title("Estimated barycenter") + plt.show() + + +def _test_perf(): + nb_repeat = 10 + nb_points_in_dgm = [5, 10, 20, 50, 100] + nb_dmg = [3, 5, 10, 20] + + from time import time + for m in nb_dmg: + for n in nb_points_in_dgm: + tstart = time() + for _ in range(nb_repeat): + X = [np.random.rand(n, 2) for _ in range(m)] + for diag in X: + #enforce having diagrams + diag[:,1] = diag[:,1] + diag[:,0] + _ = lagrangian_barycenter(X) + tend = time() + print("Computation of barycenter in %s sec, with k = %s diags and n = %s points per diag."%(np.round((tend - tstart)/nb_repeat, 2), m, n)) + print("********************") + + +def _sanity_check(verbose): + #dg1 = np.array([[0.2, 0.5]]) + #dg2 = np.array([[0.2, 0.7]]) + #dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + #dg4 = np.array([[0.72, 0.82]]) + #X = [dg1, dg2, dg3, dg4] + #Y, a = lagrangian_barycenter(X, verbose=verbose) + #_plot_barycenter(X, Y, a) + + #dg1 = np.array([[0.2, 0.5]]) + #dg2 = np.array([]) # The empty diagram + #dg3 = np.array([[0.4, 0.8]]) + #X = [dg1, dg2, dg3] + #Y, a = lagrangian_barycenter(X, verbose=verbose) + #_plot_barycenter(X, Y, a) + + #dg1 = np.array([]) + #dg2 = np.array([]) # The empty diagram + #dg3 = np.array([]) + #X = [dg1, dg2, dg3] + #Y, a = lagrangian_barycenter(X, verbose=verbose) + #_plot_barycenter(X, Y, a) + + #dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) + #dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) + #dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) + #X = [dg1, dg2, dg3] + #Y, a = lagrangian_barycenter(X, init=1, verbose=verbose) + #_plot_barycenter(X, Y, a) + + + dg1 = np.array([[0.2, 0.5]]) + dg2 = np.array([[0.2, 0.7]]) + dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + dg4 = np.array([]) + + bary = lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3) + + message = "Wasserstein barycenter estimated:" + print(message) + print(bary) if __name__=="__main__": - dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) - dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) - dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) - X = [dg1, dg2, dg3] - Y, a, e = lagrangian_barycenter(X, verbose=True) - _plot_barycenter(X, Y, a) - plt.show() + _sanity_check(verbose = True) + #_test_perf() -- cgit v1.2.3 From 56a9294ede73d0660ba724b4f448c02dcd5e3dcc Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 5 Dec 2019 18:52:16 +0100 Subject: added image for barycenter in the /img repository --- src/python/doc/barycenter_sum.inc | 6 ++++-- src/python/doc/img/barycenter.png | Bin 0 -> 12433 bytes src/python/gudhi/barycenter.py | 33 ++++++++++++++++----------------- 3 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 src/python/doc/img/barycenter.png diff --git a/src/python/doc/barycenter_sum.inc b/src/python/doc/barycenter_sum.inc index 7801a845..afac07d7 100644 --- a/src/python/doc/barycenter_sum.inc +++ b/src/python/doc/barycenter_sum.inc @@ -3,7 +3,7 @@ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | A Frechet mean (or barycenter) is a generalization of the arithmetic | :Author: Theo Lacombe | - | ../../doc/Barycenter/barycenter.png | mean in a non linear space such as the one of persistence diagrams. | | + | ./img/barycenter.png | mean in a non linear space such as the one of persistence diagrams. | | | :figclass: align-center | Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is | :Introduced in: GUDHI 3.1.0 | | | defined as a minimizer of the variance functional, that is of | | | Illustration of Frechet mean between persistence | :math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. | :Copyright: MIT | @@ -14,7 +14,9 @@ | | -lable is based on [Turner et al, 2014], and uses an EM-scheme to | | | | provide a local minimum of the variance functional (somewhat similar | | | | to the Lloyd algorithm to estimate a solution to the k-means | | - | | problem). The combinatorial structure of the algorithm limits its | | + | | problem). The local minimum returned depends on the initialization of| | + | | the barycenter. | | + | | The combinatorial structure of the algorithm limits its | | | | scaling on large scale problems (thousands of diagrams and of points | | | | per diagram). | | +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ diff --git a/src/python/doc/img/barycenter.png b/src/python/doc/img/barycenter.png new file mode 100644 index 00000000..cad6af70 Binary files /dev/null and b/src/python/doc/img/barycenter.png differ diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 3cd214a7..b4afdb6a 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -293,13 +293,12 @@ def _test_perf(): def _sanity_check(verbose): - #dg1 = np.array([[0.2, 0.5]]) - #dg2 = np.array([[0.2, 0.7]]) - #dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) - #dg4 = np.array([[0.72, 0.82]]) - #X = [dg1, dg2, dg3, dg4] - #Y, a = lagrangian_barycenter(X, verbose=verbose) - #_plot_barycenter(X, Y, a) + dg1 = np.array([[0.2, 0.5]]) + dg2 = np.array([[0.2, 0.7], [0.73, 0.88]]) + dg3 = np.array([[0.3, 0.6], [0.7, 0.85], [0.2, 0.3]]) + X = [dg1, dg2, dg3] + Y, a = lagrangian_barycenter(X, verbose=verbose) + _plot_barycenter(X, Y, a) #dg1 = np.array([[0.2, 0.5]]) #dg2 = np.array([]) # The empty diagram @@ -323,16 +322,16 @@ def _sanity_check(verbose): #_plot_barycenter(X, Y, a) - dg1 = np.array([[0.2, 0.5]]) - dg2 = np.array([[0.2, 0.7]]) - dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) - dg4 = np.array([]) - - bary = lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3) - - message = "Wasserstein barycenter estimated:" - print(message) - print(bary) + #dg1 = np.array([[0.2, 0.5]]) + #dg2 = np.array([[0.2, 0.7]]) + #dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + #dg4 = np.array([]) + # + #bary, a = lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=True) + #_plot_barycenter([dg1, dg2, dg3, dg4], bary, a) + #message = "Wasserstein barycenter estimated:" + #print(message) + #print(bary) if __name__=="__main__": _sanity_check(verbose = True) -- cgit v1.2.3 From aba9ad68394b0c5aae22c450cac7162733132002 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 5 Dec 2019 18:55:46 +0100 Subject: correction of bibliography --- src/python/doc/barycenter_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index fae2854a..1c4cb812 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -9,7 +9,7 @@ Definition .. include:: wasserstein_distance_sum.inc -This implementation is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport". +This implementation is based on ideas from "Frechet means for distribution of persistence diagrams", Turner et al. 2014. Function -------- -- cgit v1.2.3 From 5877b4d3b7aca645ba906dfe0be598b1881d8798 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 17:53:59 +0100 Subject: update CMakeLists and create test_wasserstein_bary --- src/python/CMakeLists.txt | 3 +++ src/python/gudhi/barycenter.py | 26 ++++++++++---------- src/python/test/test_wasserstein_barycenter.py | 33 ++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 12 deletions(-) create mode 100755 src/python/test/test_wasserstein_barycenter.py diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 9af85eac..7f9ff38f 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -52,6 +52,7 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'barycenter', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") add_gudhi_debug_info("Cython version ${CYTHON_VERSION}") @@ -210,6 +211,7 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/barycenter.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( OUTPUT gudhi.so @@ -385,6 +387,7 @@ if(PYTHONINTERP_FOUND) # Wasserstein if(OT_FOUND) add_gudhi_py_test(test_wasserstein_distance) + add_gudhi_py_test(test_wasserstein_barycenter) endif(OT_FOUND) # Representations diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index b4afdb6a..41418454 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -293,12 +293,12 @@ def _test_perf(): def _sanity_check(verbose): - dg1 = np.array([[0.2, 0.5]]) - dg2 = np.array([[0.2, 0.7], [0.73, 0.88]]) - dg3 = np.array([[0.3, 0.6], [0.7, 0.85], [0.2, 0.3]]) - X = [dg1, dg2, dg3] - Y, a = lagrangian_barycenter(X, verbose=verbose) - _plot_barycenter(X, Y, a) + #dg1 = np.array([[0.2, 0.5]]) + #dg2 = np.array([[0.2, 0.7], [0.73, 0.88]]) + #dg3 = np.array([[0.3, 0.6], [0.7, 0.85], [0.2, 0.3]]) + #X = [dg1, dg2, dg3] + #Y, a = lagrangian_barycenter(X, verbose=verbose) + #_plot_barycenter(X, Y, a) #dg1 = np.array([[0.2, 0.5]]) #dg2 = np.array([]) # The empty diagram @@ -313,13 +313,15 @@ def _sanity_check(verbose): #X = [dg1, dg2, dg3] #Y, a = lagrangian_barycenter(X, verbose=verbose) #_plot_barycenter(X, Y, a) + #print(Y) - #dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) - #dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) - #dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) - #X = [dg1, dg2, dg3] - #Y, a = lagrangian_barycenter(X, init=1, verbose=verbose) - #_plot_barycenter(X, Y, a) + dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) + dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) + dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) + X = [dg3] + Y, a = lagrangian_barycenter(X, verbose=verbose) + _plot_barycenter(X, Y, a) + print(Y) #dg1 = np.array([[0.2, 0.5]]) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py new file mode 100755 index 00000000..6074f250 --- /dev/null +++ b/src/python/test/test_wasserstein_barycenter.py @@ -0,0 +1,33 @@ +from gudhi.barycenter import lagrangian_barycenter +import numpy as np + +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Theo Lacombe + + Copyright (C) 2019 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +__author__ = "Theo Lacombe" +__copyright__ = "Copyright (C) 2019 Inria" +__license__ = "MIT" + + +def test_lagrangian_barycenter(): + + dg1 = np.array([[0.2, 0.5]]) + dg2 = np.array([[0.2, 0.7]]) + dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + dg4 = np.array([]) + dg5 = np.array([]) + dg6 = np.array([]) + res = np.array([[0.27916667, 0.55416667], [0.7375, 0.7625], [0.2375, 0.2625]]) + + dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) + + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < 0.001 + assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.array([])) + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < 0.001 -- cgit v1.2.3 From b4fcc875393df12f42aea84b918b5b35f99f7283 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 18:11:27 +0100 Subject: correction of typo in _user.rst and of empty array shape in test_wasserstein_barycenter --- src/python/doc/barycenter_user.rst | 2 +- src/python/test/test_wasserstein_barycenter.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index 1c4cb812..5344583f 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -33,7 +33,7 @@ Note that persistence diagrams must be submitted as (n x 2) numpy arrays and mus dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) dg4 = np.array([]) - bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3)) + bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3) message = "Wasserstein barycenter estimated:" print(message) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index 6074f250..ae3f6579 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -29,5 +29,5 @@ def test_lagrangian_barycenter(): dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < 0.001 - assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.array([])) + assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), shape=(0,2), np.array([])) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < 0.001 -- cgit v1.2.3 From 0c2fdc65cc1ea676fa8d11c24bba0d34eb5b7a3c Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 18:34:24 +0100 Subject: Correction of typo in barycenter_user --- src/python/doc/barycenter_user.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index 5344583f..714d807e 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -2,12 +2,12 @@ .. To get rid of WARNING: document isn't included in any toctree -Wasserstein distance user manual +Barycenter user manual ================================ Definition ---------- -.. include:: wasserstein_distance_sum.inc +.. include:: barycenter_sum.inc This implementation is based on ideas from "Frechet means for distribution of persistence diagrams", Turner et al. 2014. -- cgit v1.2.3 From 20047b94e693f31fd88ca142ba7256767ac753eb Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 18:34:55 +0100 Subject: correction of typo in test_wasserstein_barycenter --- src/python/test/test_wasserstein_barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index ae3f6579..dc82a57c 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -29,5 +29,5 @@ def test_lagrangian_barycenter(): dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < 0.001 - assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), shape=(0,2), np.array([])) + assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.array([], shape=(0,2))) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < 0.001 -- cgit v1.2.3 From b23813b90aaf1b0ce2b21bdfb33d2a6ea5bfe4cc Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 19:32:26 +0100 Subject: correction test --- src/python/gudhi/barycenter.py | 6 ++++-- src/python/test/test_wasserstein_barycenter.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 41418454..b76166c0 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -318,10 +318,12 @@ def _sanity_check(verbose): dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) - X = [dg3] + dg4 = np.array([]) + X = [dg4] Y, a = lagrangian_barycenter(X, verbose=verbose) - _plot_barycenter(X, Y, a) + #_plot_barycenter(X, Y, a) print(Y) + print(np.array_equal(Y, np.empty(shape=(0,2) ))) #dg1 = np.array([[0.2, 0.5]]) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index dc82a57c..910d23ff 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -29,5 +29,5 @@ def test_lagrangian_barycenter(): dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < 0.001 - assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.array([], shape=(0,2))) + assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2))) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < 0.001 -- cgit v1.2.3 From d91585af64805a11a4d446d9e3f6467f3394d0c6 Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Tue, 17 Dec 2019 18:58:48 +0100 Subject: Update src/python/gudhi/barycenter.py correction of typo Co-Authored-By: Marc Glisse --- src/python/gudhi/barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index b76166c0..43602a6e 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -114,7 +114,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): """ Compute the estimated barycenter computed with the algorithm provided by Turner et al (2014). - It is a local minima of the corresponding Frechet function. + It is a local minimum of the corresponding Frechet function. :param pdiagset: a list of size N containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. -- cgit v1.2.3 From 180add9067bc9bd0609362717972eeeb8d2f6713 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 19 Dec 2019 17:25:01 +0100 Subject: clean code and doc --- src/python/gudhi/barycenter.py | 129 ++++++++++++----------------------------- 1 file changed, 36 insertions(+), 93 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 43602a6e..c2173dba 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -58,12 +58,13 @@ def _build_dist_matrix(X, Y, p=2., q=2.): return Cf -def _optimal_matching(X, Y): +def _optimal_matching(X, Y, withcost=False): """ :param X: numpy.array of size (n x 2) :param Y: numpy.array of size (m x 2) + :param withcost: returns also the cost corresponding to this optimal matching :returns: numpy.array of shape (k x 2) encoding the list of edges in the optimal matching. - That is, [[(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] + That is, [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] if i > len(X) or j > len(Y), it means they represent the diagonal. """ @@ -74,10 +75,10 @@ def _optimal_matching(X, Y): if Y.size == 0: # Y is empty return np.array([[0,0]]) # the diagonal is matched to the diagonal and that's it... else: - return np.column_stack([np.zeros(m+1, dtype=int), np.arange(m+1, dtype=int)]) # TO BE CORRECTED + return np.column_stack([np.zeros(m+1, dtype=int), np.arange(m+1, dtype=int)]) elif Y.size == 0: # X is not empty but Y is empty - return np.column_stack([np.zeros(n+1, dtype=int), np.arange(n+1, dtype=int)]) # TO BE CORRECTED - + return np.column_stack([np.zeros(n+1, dtype=int), np.arange(n+1, dtype=int)]) + # we know X, Y are not empty diags now M = _build_dist_matrix(X, Y) @@ -86,12 +87,16 @@ def _optimal_matching(X, Y): b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. b[-1] = b[-1] * n # so that we have a probability measure, required by POT P = ot.emd(a=a, b=b, M=M)*(n+m) - # Note : it seems POT return a permutation matrix in this situation, - # ...guarantee...? - # It should be enough to check that the algorithm only iterates on vertices of the transportation polytope. + # Note : it seems POT return a permutation matrix in this situation, ie a vertex of the constraint set (generically true). + if withcost: + cost = np.sqrt(np.sum(np.multiply(P, M))) P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to be improved. # return the list of (i,j) such that P[i,j] > 0, i.e. x_i is matched to y_j (should it be the diag). res = np.nonzero(P) + + if withcost: + return np.column_stack(res), cost + return np.column_stack(res) @@ -123,13 +128,16 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): Otherwise, it must be an int (then we init with diagset[init]) or a (n x 2) numpy.array enconding a persistence diagram with n points. :param verbose: if True, returns additional information about the - barycenters (assignment and energy). + barycenter. :returns: If not verbose (default), a numpy.array encoding the barycenter estimate (local minima of the energy function). - If verbose, returns a triplet (Y, a, e) - where Y is the barycenter estimate, a is the assignments between the - points of Y and thoses of the diagrams, - and e is the energy value reached by the estimate. + If verbose, returns a couple (Y, log) + where Y is the barycenter estimate, + and log is a dict that contains additional informations: + - assigments, a list of list of pairs (i,j), + That is, a[k] = [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] + if i > len(X) or j > len(Y), it means they represent the diagonal. + - energy, a float representing the Frechet mean value obtained. """ X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging @@ -200,25 +208,29 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): if verbose: - matchings = [] - #energy = 0 + groupings = [] + energy = 0 + log = {} n_y = len(Y) for i in range(m): - edges = _optimal_matching(Y, X[i]) - matchings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) - # energy += sum([M[i,j] for i,j in enumerate(edges)]) - - # energy = energy/m - return Y, matchings #, energy + edges, cost = _optimal_matching(Y, X[i], withcost=True) + print(edges) + groupings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) + energy += cost + log["groupings"] = groupings + energy = energy/m + log["energy"] = energy + + return Y, log else: return Y -def _plot_barycenter(X, Y, matchings): +def _plot_barycenter(X, Y, groupings): """ :param X: list of persistence diagrams. :param Y: numpy.array of (n x 2). Aims to be an estimate of the barycenter returned by lagrangian_barycenter(X, verbose=True). - :param matchings: list of lists, such that L[k][i] = j if and only if + :param groupings: list of lists, such that L[k][i] = j if and only if the i-th point of the barycenter is grouped with the j-th point of the k-th diagram. """ @@ -232,7 +244,7 @@ def _plot_barycenter(X, Y, matchings): # n_y = len(Y.points) for i in range(len(X)): - indices = matchings[i] + indices = groupings[i] n_i = len(X[i]) for (y_j, x_i_j) in indices: @@ -271,72 +283,3 @@ def _plot_barycenter(X, Y, matchings): plt.show() - -def _test_perf(): - nb_repeat = 10 - nb_points_in_dgm = [5, 10, 20, 50, 100] - nb_dmg = [3, 5, 10, 20] - - from time import time - for m in nb_dmg: - for n in nb_points_in_dgm: - tstart = time() - for _ in range(nb_repeat): - X = [np.random.rand(n, 2) for _ in range(m)] - for diag in X: - #enforce having diagrams - diag[:,1] = diag[:,1] + diag[:,0] - _ = lagrangian_barycenter(X) - tend = time() - print("Computation of barycenter in %s sec, with k = %s diags and n = %s points per diag."%(np.round((tend - tstart)/nb_repeat, 2), m, n)) - print("********************") - - -def _sanity_check(verbose): - #dg1 = np.array([[0.2, 0.5]]) - #dg2 = np.array([[0.2, 0.7], [0.73, 0.88]]) - #dg3 = np.array([[0.3, 0.6], [0.7, 0.85], [0.2, 0.3]]) - #X = [dg1, dg2, dg3] - #Y, a = lagrangian_barycenter(X, verbose=verbose) - #_plot_barycenter(X, Y, a) - - #dg1 = np.array([[0.2, 0.5]]) - #dg2 = np.array([]) # The empty diagram - #dg3 = np.array([[0.4, 0.8]]) - #X = [dg1, dg2, dg3] - #Y, a = lagrangian_barycenter(X, verbose=verbose) - #_plot_barycenter(X, Y, a) - - #dg1 = np.array([]) - #dg2 = np.array([]) # The empty diagram - #dg3 = np.array([]) - #X = [dg1, dg2, dg3] - #Y, a = lagrangian_barycenter(X, verbose=verbose) - #_plot_barycenter(X, Y, a) - #print(Y) - - dg1 = np.array([[0.1, 0.12], [0.21, 0.7], [0.4, 0.5], [0.3, 0.4], [0.35, 0.7], [0.5, 0.55], [0.32, 0.42], [0.1, 0.4], [0.2, 0.4]]) - dg2 = np.array([[0.09, 0.11], [0.3, 0.43], [0.5, 0.61], [0.3, 0.7], [0.42, 0.5], [0.35, 0.41], [0.74, 0.9], [0.5, 0.95], [0.35, 0.45], [0.13, 0.48], [0.32, 0.45]]) - dg3 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) - dg4 = np.array([]) - X = [dg4] - Y, a = lagrangian_barycenter(X, verbose=verbose) - #_plot_barycenter(X, Y, a) - print(Y) - print(np.array_equal(Y, np.empty(shape=(0,2) ))) - - - #dg1 = np.array([[0.2, 0.5]]) - #dg2 = np.array([[0.2, 0.7]]) - #dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) - #dg4 = np.array([]) - # - #bary, a = lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=True) - #_plot_barycenter([dg1, dg2, dg3, dg4], bary, a) - #message = "Wasserstein barycenter estimated:" - #print(message) - #print(bary) - -if __name__=="__main__": - _sanity_check(verbose = True) - #_test_perf() -- cgit v1.2.3 From b7138871d42197c94c58b9938279455b75723606 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 19 Dec 2019 17:28:06 +0100 Subject: removed plot barycenter. Will be integrated in a tutorial --- src/python/gudhi/barycenter.py | 58 ------------------------------------------ 1 file changed, 58 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index c2173dba..11098afe 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -225,61 +225,3 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): else: return Y -def _plot_barycenter(X, Y, groupings): - """ - :param X: list of persistence diagrams. - :param Y: numpy.array of (n x 2). Aims to be an estimate of the barycenter - returned by lagrangian_barycenter(X, verbose=True). - :param groupings: list of lists, such that L[k][i] = j if and only if - the i-th point of the barycenter is grouped with the j-th point of the k-th - diagram. - """ - # import matplotlib now to avoid useless dependancies - - import matplotlib.pyplot as plt - from matplotlib.patches import Polygon - - fig = plt.figure() - ax = fig.add_subplot(111) - - # n_y = len(Y.points) - for i in range(len(X)): - indices = groupings[i] - n_i = len(X[i]) - - for (y_j, x_i_j) in indices: - y = Y[y_j] - if y[0] != y[1]: - if x_i_j < n_i: # not mapped with the diag - x = X[i][x_i_j] - else: # y_j is matched to the diagonal - x = _proj_on_diag(y) - ax.plot([y[0], x[0]], [y[1], x[1]], c='black', - linestyle="dashed") - - ax.scatter(Y[:,0], Y[:,1], color='purple', marker='d', zorder=2) - - for X_i in X: - if X_i.size > 0: - ax.scatter(X_i[:,0], X_i[:,1], marker ='o', zorder=2) - - shift = 0.1 # for improved rendering - try: - xmin = np.min(np.array([np.min(x[:,0]) for x in X if len(x) > 0]) - shift) - xmax = np.max(np.array([np.max(x[:,0]) for x in X if len(x) > 0]) + shift) - ymin = np.min(np.array([np.max(x[:,1]) for x in X if len(x) > 0]) - shift) - ymax = np.max(np.array([np.max(x[:,1]) for x in X if len(x) > 0]) + shift) - except ValueError: # to handle the pecular case where we only average empty diagrams. - xmin, xmax, ymin, ymax = 0, 1, 0, 1 - themin = min(xmin, ymin) - themax = max(xmax, ymax) - ax.set_xlim(themin, themax) - ax.set_ylim(themin, themax) - ax.add_patch(Polygon([[themin,themin], [themax,themin], [themax,themax]], fill=True, color='lightgrey')) - ax.set_xticks([]) - ax.set_yticks([]) - ax.set_aspect('equal', adjustable='box') - ax.set_title("Estimated barycenter") - - plt.show() - -- cgit v1.2.3 From 8c30016a3c56522014254dc571ed4fe81f31e02b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 25 Dec 2019 22:18:20 +0100 Subject: Add Hera as a submodule --- .gitmodules | 3 +++ ext/hera | 1 + 2 files changed, 4 insertions(+) create mode 100644 .gitmodules create mode 160000 ext/hera diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..6e8b3ab1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ext/hera"] + path = ext/hera + url = https://bitbucket.org/grey_narn/hera.git diff --git a/ext/hera b/ext/hera new file mode 160000 index 00000000..5a59cfad --- /dev/null +++ b/ext/hera @@ -0,0 +1 @@ +Subproject commit 5a59cfad45c155f8af89c2c6d82db2848d52a953 -- cgit v1.2.3 From 15f222eecf3b427c59f09ec3bec17983377d96a2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 25 Dec 2019 22:27:52 +0100 Subject: Copy hera headers in user_version --- src/cmake/modules/GUDHI_user_version_target.cmake | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake index 4fa74330..2527dee9 100644 --- a/src/cmake/modules/GUDHI_user_version_target.cmake +++ b/src/cmake/modules/GUDHI_user_version_target.cmake @@ -56,6 +56,9 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI) +add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E + copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include ${GUDHI_USER_VERSION_DIR}/hera/wasserstein) + set(GUDHI_DIRECTORIES "doc;example;concept;utilities") set(GUDHI_INCLUDE_DIRECTORIES "include/gudhi") @@ -95,4 +98,4 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) endforeach() endforeach(GUDHI_INCLUDE_DIRECTORY ${GUDHI_INCLUDE_DIRECTORIES}) -endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) \ No newline at end of file +endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) -- cgit v1.2.3 From c2e22942c35e894d5c1ddc429eb32687c61538c8 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Dec 2019 10:22:47 +0100 Subject: Basic binding for wasserstein_distance --- src/cmake/modules/GUDHI_user_version_target.cmake | 2 +- src/python/gudhi/hera.cc | 48 +++++++++++++++++++++++ src/python/setup.py.in | 26 +++++++++++- 3 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 src/python/gudhi/hera.cc diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake index 2527dee9..9a05386f 100644 --- a/src/cmake/modules/GUDHI_user_version_target.cmake +++ b/src/cmake/modules/GUDHI_user_version_target.cmake @@ -57,7 +57,7 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI) add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E - copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include ${GUDHI_USER_VERSION_DIR}/hera/wasserstein) + copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include ${GUDHI_USER_VERSION_DIR}/ext/hera/geom_matching/wasserstein/include) set(GUDHI_DIRECTORIES "doc;example;concept;utilities") diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc new file mode 100644 index 00000000..7cef9425 --- /dev/null +++ b/src/python/gudhi/hera.cc @@ -0,0 +1,48 @@ +#include +#include + +#include + +#include + +#include + +namespace py = pybind11; +typedef py::array_t Dgm; + +namespace hera { +template <> struct DiagramTraits{ + //using Container = void; + using PointType = std::array; + using RealType = double; + + static RealType get_x(const PointType& p) { return std::get<0>(p); } + static RealType get_y(const PointType& p) { return std::get<1>(p); } +}; +} + +double wasserstein_distance( + Dgm d1, + Dgm d2) +{ + py::buffer_info buf1 = d1.request(); + py::buffer_info buf2 = d2.request(); + if(buf1.ndim!=2 || buf1.shape[1]!=2) + throw std::runtime_error("Diagram 1 must be an array of size n x 2"); + if(buf2.ndim!=2 || buf2.shape[1]!=2) + throw std::runtime_error("Diagram 1 must be an array of size n x 2"); + typedef hera::DiagramTraits::PointType Point; + auto p1 = (Point*)buf1.ptr; + auto p2 = (Point*)buf2.ptr; + auto diag1 = boost::make_iterator_range(p1, p1+buf1.shape[0]); + auto diag2 = boost::make_iterator_range(p2, p2+buf2.shape[0]); + + hera::AuctionParams params; + return hera::wasserstein_dist(diag1, diag2, params); +} + +PYBIND11_MODULE(hera, m) { + m.def("wasserstein_distance", &wasserstein_distance, R"pbdoc( + Compute the Wasserstein distance between two diagrams + )pbdoc"); +} diff --git a/src/python/setup.py.in b/src/python/setup.py.in index 3f1d4424..f7ffd146 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -26,6 +26,19 @@ library_dirs=[@GUDHI_PYTHON_LIBRARY_DIRS@] include_dirs = [numpy_get_include(), '@CMAKE_CURRENT_SOURCE_DIR@/gudhi/', @GUDHI_PYTHON_INCLUDE_DIRS@] runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@] +class get_pybind_include(object): + """Helper class to determine the pybind11 include path + The purpose of this class is to postpone importing pybind11 + until it is actually installed, so that the ``get_include()`` + method can be invoked. """ + + def __init__(self, user=False): + self.user = user + + def __str__(self): + import pybind11 + return pybind11.get_include(self.user) + # Create ext_modules list from module list ext_modules = [] for module in modules: @@ -39,6 +52,15 @@ for module in modules: library_dirs=library_dirs, include_dirs=include_dirs, runtime_library_dirs=runtime_library_dirs,)) +ext_modules.append(Extension( + 'gudhi.hera', + sources = [source_dir + 'hera.cc'], + language = 'c++', + extra_compile_args=extra_compile_args + ['-fvisibility=hidden'], # FIXME + include_dirs = include_dirs + + ['@CMAKE_SOURCE_DIR@/ext/hera/geom_matching/wasserstein/include', + get_pybind_include(False), get_pybind_include(True)] + )) setup( name = 'gudhi', @@ -48,6 +70,6 @@ setup( version='@GUDHI_VERSION@', url='http://gudhi.gforge.inria.fr/', ext_modules = cythonize(ext_modules), - install_requires = ['cython','numpy >= 1.9',], - setup_requires = ['numpy >= 1.9',], + install_requires = ['cython','numpy >= 1.9','pybind11',], + setup_requires = ['numpy >= 1.9','pybind11',], ) -- cgit v1.2.3 From 56cee2efaa26e734c9555b5b0bb9dfbbc4baaed8 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Dec 2019 17:33:51 +0100 Subject: Fix compilation --- src/python/CMakeLists.txt | 1 + src/python/setup.py.in | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b558d4c4..bec38305 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -86,6 +86,7 @@ if(PYTHONINTERP_FOUND) endif(MSVC) if(CMAKE_COMPILER_IS_GNUCXX) set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-frounding-math', ") + set(GUDHI_PYBIND11_EXTRA_COMPILE_ARGS "${GUDHI_PYBIND11_EXTRA_COMPILE_ARGS}'-fvisibility=hidden', ") endif(CMAKE_COMPILER_IS_GNUCXX) if (CMAKE_CXX_COMPILER_ID MATCHES Intel) set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-fp-model strict', ") diff --git a/src/python/setup.py.in b/src/python/setup.py.in index f7ffd146..2d96c57b 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -26,6 +26,7 @@ library_dirs=[@GUDHI_PYTHON_LIBRARY_DIRS@] include_dirs = [numpy_get_include(), '@CMAKE_CURRENT_SOURCE_DIR@/gudhi/', @GUDHI_PYTHON_INCLUDE_DIRS@] runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@] +# Copied from https://github.com/pybind/python_example/blob/master/setup.py class get_pybind_include(object): """Helper class to determine the pybind11 include path The purpose of this class is to postpone importing pybind11 @@ -52,14 +53,16 @@ for module in modules: library_dirs=library_dirs, include_dirs=include_dirs, runtime_library_dirs=runtime_library_dirs,)) +ext_modules = cythonize(ext_modules) + ext_modules.append(Extension( 'gudhi.hera', sources = [source_dir + 'hera.cc'], language = 'c++', - extra_compile_args=extra_compile_args + ['-fvisibility=hidden'], # FIXME include_dirs = include_dirs + ['@CMAKE_SOURCE_DIR@/ext/hera/geom_matching/wasserstein/include', - get_pybind_include(False), get_pybind_include(True)] + get_pybind_include(False), get_pybind_include(True)], + extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], )) setup( @@ -69,7 +72,7 @@ setup( author_email='gudhi-contact@lists.gforge.inria.fr', version='@GUDHI_VERSION@', url='http://gudhi.gforge.inria.fr/', - ext_modules = cythonize(ext_modules), + ext_modules = ext_modules, install_requires = ['cython','numpy >= 1.9','pybind11',], setup_requires = ['numpy >= 1.9','pybind11',], ) -- cgit v1.2.3 From 4922f305b7601d9e5d7eb39c73a88ee53bf1ca87 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Dec 2019 18:31:47 +0100 Subject: Update doc --- src/python/doc/wasserstein_distance_user.rst | 14 +++++++++++--- src/python/gudhi/hera.cc | 8 +++++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index a049cfb5..13f6f1af 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -9,12 +9,20 @@ Definition .. include:: wasserstein_distance_sum.inc -This implementation is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport". +Functions +--------- +This implementation is based on ideas from "Large Scale Computation of Means +and Cluster for Persistence Diagrams via Optimal Transport". -Function --------- .. autofunction:: gudhi.wasserstein.wasserstein_distance +This other implementation comes from `Hera +`_ and is based on `"Geometry +Helps to Compare Persistence Diagrams." +`_ by Michael Kerber, Dmitriy +Morozov, and Arnur Nigmetov, at ALENEX 2016. + +.. autofunction:: gudhi.hera.wasserstein_distance Basic example ------------- diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 7cef9425..04f5990f 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -42,7 +42,13 @@ double wasserstein_distance( } PYBIND11_MODULE(hera, m) { - m.def("wasserstein_distance", &wasserstein_distance, R"pbdoc( + m.def("wasserstein_distance", &wasserstein_distance, + py::arg("X"), py::arg("Y"), + R"pbdoc( Compute the Wasserstein distance between two diagrams + + Parameters: + X (n x 2 numpy array): First diagram + Y (n x 2 numpy array): Second diagram )pbdoc"); } -- cgit v1.2.3 From 003b33403ab92e25cab2b9e51b36528d5cc6112c Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Dec 2019 19:30:04 +0100 Subject: Check for pybind11 --- .appveyor.yml | 2 +- .travis.yml | 2 +- .../modules/GUDHI_third_party_libraries.cmake | 1 + src/python/CMakeLists.txt | 51 ++++++++++++---------- 4 files changed, 31 insertions(+), 25 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 4a76ea0a..3a33ed62 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -48,7 +48,7 @@ install: - pip --version - python -m pip install --upgrade pip - pip install -U setuptools numpy matplotlib scipy Cython pytest - - pip install -U POT + - pip install -U POT pybind11 build_script: - mkdir build diff --git a/.travis.yml b/.travis.yml index d6c82e70..4b4c7068 100644 --- a/.travis.yml +++ b/.travis.yml @@ -59,7 +59,7 @@ before_cache: install: - python3 -m pip install --upgrade pip setuptools wheel - python3 -m pip install --user pytest Cython sphinx sphinxcontrib-bibtex sphinx-paramlinks matplotlib numpy scipy scikit-learn - - python3 -m pip install --user POT + - python3 -m pip install --user POT pybind11 script: - rm -rf build diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 24a34150..cb9f9033 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -127,6 +127,7 @@ if( PYTHONINTERP_FOUND ) find_python_module("sphinx") find_python_module("sklearn") find_python_module("ot") + find_python_module("pybind11") endif() if(NOT GUDHI_PYTHON_PATH) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index bec38305..edb1ba02 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -407,32 +407,37 @@ endif(CGAL_FOUND) if(SCIPY_FOUND) if(SKLEARN_FOUND) if(OT_FOUND) - if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") - # User warning - Sphinx is a static pages generator, and configured to work fine with user_version - # Images and biblio warnings because not found on developper version - if (GUDHI_PYTHON_PATH STREQUAL "src/python") - set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") - endif() - # sphinx target requires gudhi.so, because conf.py reads gudhi version from it - add_custom_target(sphinx - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx - DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" - COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) + if(PYBIND11_FOUND) + if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") + # User warning - Sphinx is a static pages generator, and configured to work fine with user_version + # Images and biblio warnings because not found on developper version + if (GUDHI_PYTHON_PATH STREQUAL "src/python") + set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") + endif() + # sphinx target requires gudhi.so, because conf.py reads gudhi version from it + add_custom_target(sphinx + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" + COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) - add_test(NAME sphinx_py_test - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) + add_test(NAME sphinx_py_test + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) - # Set missing or not modules - set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") - else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") + # Set missing or not modules + set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") + else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + else(PYBIND11_FOUND) + message("++ Python documentation module will not be compiled because pybind11 was not found") set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + endif(PYBIND11_FOUND) else(OT_FOUND) message("++ Python documentation module will not be compiled because POT was not found") set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") -- cgit v1.2.3 From 7568b34c56e6a6102507df1be0029a0259f2afa7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Dec 2019 20:37:19 +0100 Subject: Checkout submodules in circleci --- .circleci/config.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5e45bc14..51b6c019 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -50,6 +50,8 @@ jobs: - run: name: Build and test python module. Generates and tests the python documentation command: | + git submodule init + git submodule update mkdir build; cd build; cmake -DUSER_VERSION_DIR=version ..; @@ -74,6 +76,8 @@ jobs: - run: name: Generates the C++ documentation with doxygen command: | + git submodule init + git submodule update mkdir build; cd build; cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version ..; @@ -93,4 +97,4 @@ workflows: - tests - utils - python - - doxygen \ No newline at end of file + - doxygen -- cgit v1.2.3 From b8701d847db37b80a58770e00b91494889df00e8 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 27 Dec 2019 00:56:08 +0100 Subject: Expose more options --- src/python/doc/wasserstein_distance_user.rst | 4 ++-- src/python/gudhi/hera.cc | 31 +++++++++++++++++++++------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 13f6f1af..6cd7f3a0 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -17,8 +17,8 @@ and Cluster for Persistence Diagrams via Optimal Transport". .. autofunction:: gudhi.wasserstein.wasserstein_distance This other implementation comes from `Hera -`_ and is based on `"Geometry -Helps to Compare Persistence Diagrams." +`_ (BSD-3-Clause) and is +based on `"Geometry Helps to Compare Persistence Diagrams." `_ by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov, at ALENEX 2016. diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 04f5990f..898040fb 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -12,7 +12,6 @@ typedef py::array_t Dgm; namespace hera { template <> struct DiagramTraits{ - //using Container = void; using PointType = std::array; using RealType = double; @@ -22,15 +21,17 @@ template <> struct DiagramTraits{ } double wasserstein_distance( - Dgm d1, - Dgm d2) + Dgm d1, Dgm d2, + double wasserstein_power, double internal_p, + double delta) { py::buffer_info buf1 = d1.request(); py::buffer_info buf2 = d2.request(); - if(buf1.ndim!=2 || buf1.shape[1]!=2) - throw std::runtime_error("Diagram 1 must be an array of size n x 2"); - if(buf2.ndim!=2 || buf2.shape[1]!=2) + // shape (n,2) or (0) for empty + if((buf1.ndim!=2 || buf1.shape[1]!=2) && (buf1.ndim!=1 || buf1.shape[0]!=0)) throw std::runtime_error("Diagram 1 must be an array of size n x 2"); + if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0)) + throw std::runtime_error("Diagram 2 must be an array of size n x 2"); typedef hera::DiagramTraits::PointType Point; auto p1 = (Point*)buf1.ptr; auto p2 = (Point*)buf2.ptr; @@ -38,17 +39,33 @@ double wasserstein_distance( auto diag2 = boost::make_iterator_range(p2, p2+buf2.shape[0]); hera::AuctionParams params; + params.wasserstein_power = wasserstein_power; + // hera encodes infinity as -1... + if(std::isinf(internal_p)) internal_p = hera::get_infinity(); + params.internal_p = internal_p; + params.delta = delta; + // The extra parameters are purposedly not exposed for now. return hera::wasserstein_dist(diag1, diag2, params); } PYBIND11_MODULE(hera, m) { m.def("wasserstein_distance", &wasserstein_distance, py::arg("X"), py::arg("Y"), + // Should we name those q, p and d instead? + py::arg("wasserstein_power") = 1, + py::arg("internal_p") = std::numeric_limits::infinity(), + py::arg("delta") = .01, R"pbdoc( - Compute the Wasserstein distance between two diagrams + Compute the Wasserstein distance between two diagrams. Points at infinity are supported. Parameters: X (n x 2 numpy array): First diagram Y (n x 2 numpy array): Second diagram + wasserstein_power (float): Wasserstein degree W_q + internal_p (float): Internal Minkowski norm L^p in R^2 + delta (float): Relative error 1+delta + + Returns: + float: Approximate Wasserstein distance W_q(X,Y) )pbdoc"); } -- cgit v1.2.3 From ec8b343dafdb70acc4a948ef737d83a3cc4d9f7b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 27 Dec 2019 12:43:03 +0100 Subject: Handle submodules in appveyor --- .appveyor.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.appveyor.yml b/.appveyor.yml index 3a33ed62..34f42dea 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -39,6 +39,7 @@ init: install: + - git submodule update --init - vcpkg install tbb:x64-windows boost-disjoint-sets:x64-windows boost-serialization:x64-windows boost-date-time:x64-windows boost-system:x64-windows boost-filesystem:x64-windows boost-units:x64-windows boost-thread:x64-windows boost-program-options:x64-windows eigen3:x64-windows mpfr:x64-windows mpir:x64-windows cgal:x64-windows - SET PATH=c:\Tools\vcpkg\installed\x64-windows\bin;%PATH% - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PYTHON%\Library\bin;%PATH% -- cgit v1.2.3 From ef4a688e07e070b190caf267a64fedd607830ee7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 2 Jan 2020 20:25:04 +0100 Subject: Update Hera --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index 5a59cfad..9a899718 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit 5a59cfad45c155f8af89c2c6d82db2848d52a953 +Subproject commit 9a89971855acefe39dce0e2adadf53b88ca8f683 -- cgit v1.2.3 From 23ccc50ab3608202452e44f287e4817ffa98227c Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 13 Jan 2020 11:14:37 +0100 Subject: Review all cout and cerr. cerr is reserved for warnings/error and cout for traces --- .../include/gudhi/Bitmap_cubical_complex.h | 56 ++++---- .../include/gudhi/Bitmap_cubical_complex_base.h | 34 ++--- ...cal_complex_periodic_boundary_conditions_base.h | 15 +- src/GudhUI/model/Model.h | 2 +- src/Nerve_GIC/include/gudhi/GIC.h | 20 ++- .../include/gudhi/Persistence_heat_maps.h | 58 ++++---- .../include/gudhi/Persistence_intervals.h | 54 ++++---- .../include/gudhi/Persistence_landscape.h | 154 ++++++++++----------- .../include/gudhi/Persistence_landscape_on_grid.h | 120 ++++++++-------- .../include/gudhi/Persistence_vectors.h | 24 ++-- .../include/gudhi/read_persistence_from_file.h | 8 +- src/common/include/gudhi/Debug_utils.h | 2 +- src/common/include/gudhi/reader_utils.h | 6 +- 13 files changed, 274 insertions(+), 279 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h index 37514dee..2f95dff3 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h @@ -69,7 +69,7 @@ class Bitmap_cubical_complex : public T { Bitmap_cubical_complex(const char* perseus_style_file) : T(perseus_style_file), key_associated_to_simplex(this->total_number_of_cells + 1) { if (globalDbg) { - std::cerr << "Bitmap_cubical_complex( const char* perseus_style_file )\n"; + std::cout << "Bitmap_cubical_complex( const char* perseus_style_file )\n"; } for (std::size_t i = 0; i != this->total_number_of_cells; ++i) { this->key_associated_to_simplex[i] = i; @@ -137,7 +137,7 @@ class Bitmap_cubical_complex : public T { **/ static Simplex_handle null_simplex() { if (globalDbg) { - std::cerr << "Simplex_handle null_simplex()\n"; + std::cout << "Simplex_handle null_simplex()\n"; } return std::numeric_limits::max(); } @@ -152,7 +152,7 @@ class Bitmap_cubical_complex : public T { **/ inline unsigned dimension(Simplex_handle sh) const { if (globalDbg) { - std::cerr << "unsigned dimension(const Simplex_handle& sh)\n"; + std::cout << "unsigned dimension(const Simplex_handle& sh)\n"; } if (sh != null_simplex()) return this->get_dimension_of_a_cell(sh); return -1; @@ -163,7 +163,7 @@ class Bitmap_cubical_complex : public T { **/ Filtration_value filtration(Simplex_handle sh) { if (globalDbg) { - std::cerr << "Filtration_value filtration(const Simplex_handle& sh)\n"; + std::cout << "Filtration_value filtration(const Simplex_handle& sh)\n"; } // Returns the filtration value of a simplex. if (sh != null_simplex()) return this->data[sh]; @@ -175,7 +175,7 @@ class Bitmap_cubical_complex : public T { **/ static Simplex_key null_key() { if (globalDbg) { - std::cerr << "Simplex_key null_key()\n"; + std::cout << "Simplex_key null_key()\n"; } return std::numeric_limits::max(); } @@ -185,7 +185,7 @@ class Bitmap_cubical_complex : public T { **/ Simplex_key key(Simplex_handle sh) const { if (globalDbg) { - std::cerr << "Simplex_key key(const Simplex_handle& sh)\n"; + std::cout << "Simplex_key key(const Simplex_handle& sh)\n"; } if (sh != null_simplex()) { return this->key_associated_to_simplex[sh]; @@ -198,7 +198,7 @@ class Bitmap_cubical_complex : public T { **/ Simplex_handle simplex(Simplex_key key) { if (globalDbg) { - std::cerr << "Simplex_handle simplex(Simplex_key key)\n"; + std::cout << "Simplex_handle simplex(Simplex_key key)\n"; } if (key != null_key()) { return this->simplex_associated_to_key[key]; @@ -211,7 +211,7 @@ class Bitmap_cubical_complex : public T { **/ void assign_key(Simplex_handle sh, Simplex_key key) { if (globalDbg) { - std::cerr << "void assign_key(Simplex_handle& sh, Simplex_key key)\n"; + std::cout << "void assign_key(Simplex_handle& sh, Simplex_key key)\n"; } if (key == null_key()) return; this->key_associated_to_simplex[sh] = key; @@ -251,7 +251,7 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator operator++() { if (globalDbg) { - std::cerr << "Filtration_simplex_iterator operator++\n"; + std::cout << "Filtration_simplex_iterator operator++\n"; } ++this->position; return (*this); @@ -265,7 +265,7 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator& operator=(const Filtration_simplex_iterator& rhs) { if (globalDbg) { - std::cerr << "Filtration_simplex_iterator operator =\n"; + std::cout << "Filtration_simplex_iterator operator =\n"; } this->b = rhs.b; this->position = rhs.position; @@ -274,21 +274,21 @@ class Bitmap_cubical_complex : public T { bool operator==(const Filtration_simplex_iterator& rhs) const { if (globalDbg) { - std::cerr << "bool operator == ( const Filtration_simplex_iterator& rhs )\n"; + std::cout << "bool operator == ( const Filtration_simplex_iterator& rhs )\n"; } return (this->position == rhs.position); } bool operator!=(const Filtration_simplex_iterator& rhs) const { if (globalDbg) { - std::cerr << "bool operator != ( const Filtration_simplex_iterator& rhs )\n"; + std::cout << "bool operator != ( const Filtration_simplex_iterator& rhs )\n"; } return !(*this == rhs); } Simplex_handle operator*() { if (globalDbg) { - std::cerr << "Simplex_handle operator*()\n"; + std::cout << "Simplex_handle operator*()\n"; } return this->b->simplex_associated_to_key[this->position]; } @@ -314,14 +314,14 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator begin() { if (globalDbg) { - std::cerr << "Filtration_simplex_iterator begin() \n"; + std::cout << "Filtration_simplex_iterator begin() \n"; } return Filtration_simplex_iterator(this->b); } Filtration_simplex_iterator end() { if (globalDbg) { - std::cerr << "Filtration_simplex_iterator end()\n"; + std::cout << "Filtration_simplex_iterator end()\n"; } Filtration_simplex_iterator it(this->b); it.position = this->b->simplex_associated_to_key.size(); @@ -347,7 +347,7 @@ class Bitmap_cubical_complex : public T { **/ Filtration_simplex_range filtration_simplex_range() { if (globalDbg) { - std::cerr << "Filtration_simplex_range filtration_simplex_range()\n"; + std::cout << "Filtration_simplex_range filtration_simplex_range()\n"; } // Returns a range over the simplices of the complex in the order of the filtration return Filtration_simplex_range(this); @@ -370,8 +370,8 @@ class Bitmap_cubical_complex : public T { std::pair endpoints(Simplex_handle sh) { std::vector bdry = this->get_boundary_of_a_cell(sh); if (globalDbg) { - std::cerr << "std::pair endpoints( Simplex_handle sh )\n"; - std::cerr << "bdry.size() : " << bdry.size() << "\n"; + std::cout << "std::pair endpoints( Simplex_handle sh )\n"; + std::cout << "bdry.size() : " << bdry.size() << "\n"; } // this method returns two first elements from the boundary of sh. if (bdry.size() < 2) @@ -392,7 +392,7 @@ class Bitmap_cubical_complex : public T { public: Skeleton_simplex_iterator(Bitmap_cubical_complex* b, std::size_t d) : b(b), dimension(d) { if (globalDbg) { - std::cerr << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n"; + std::cout << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n"; } // find the position of the first simplex of a dimension d this->position = 0; @@ -406,7 +406,7 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator operator++() { if (globalDbg) { - std::cerr << "Skeleton_simplex_iterator operator++()\n"; + std::cout << "Skeleton_simplex_iterator operator++()\n"; } // increment the position as long as you did not get to the next element of the dimension dimension. ++this->position; @@ -425,7 +425,7 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator& operator=(const Skeleton_simplex_iterator& rhs) { if (globalDbg) { - std::cerr << "Skeleton_simplex_iterator operator =\n"; + std::cout << "Skeleton_simplex_iterator operator =\n"; } this->b = rhs.b; this->position = rhs.position; @@ -435,21 +435,21 @@ class Bitmap_cubical_complex : public T { bool operator==(const Skeleton_simplex_iterator& rhs) const { if (globalDbg) { - std::cerr << "bool operator ==\n"; + std::cout << "bool operator ==\n"; } return (this->position == rhs.position); } bool operator!=(const Skeleton_simplex_iterator& rhs) const { if (globalDbg) { - std::cerr << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n"; + std::cout << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n"; } return !(*this == rhs); } Simplex_handle operator*() { if (globalDbg) { - std::cerr << "Simplex_handle operator*() \n"; + std::cout << "Simplex_handle operator*() \n"; } return this->position; } @@ -476,14 +476,14 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator begin() { if (globalDbg) { - std::cerr << "Skeleton_simplex_iterator begin()\n"; + std::cout << "Skeleton_simplex_iterator begin()\n"; } return Skeleton_simplex_iterator(this->b, this->dimension); } Skeleton_simplex_iterator end() { if (globalDbg) { - std::cerr << "Skeleton_simplex_iterator end()\n"; + std::cout << "Skeleton_simplex_iterator end()\n"; } Skeleton_simplex_iterator it(this->b, this->dimension); it.position = this->b->data.size(); @@ -500,7 +500,7 @@ class Bitmap_cubical_complex : public T { **/ Skeleton_simplex_range skeleton_simplex_range(unsigned dimension) { if (globalDbg) { - std::cerr << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n"; + std::cout << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n"; } return Skeleton_simplex_range(this, dimension); } @@ -515,7 +515,7 @@ class Bitmap_cubical_complex : public T { template void Bitmap_cubical_complex::initialize_simplex_associated_to_key() { if (globalDbg) { - std::cerr << "void Bitmap_cubical_complex::initialize_elements_ordered_according_to_filtration() \n"; + std::cout << "void Bitmap_cubical_complex::initialize_elements_ordered_according_to_filtration() \n"; } this->simplex_associated_to_key = std::vector(this->data.size()); std::iota(std::begin(simplex_associated_to_key), std::end(simplex_associated_to_key), 0); diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 0d6299d2..96036fd4 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -142,7 +142,7 @@ class Bitmap_cubical_complex_base { } if (coface_counter[i] != face_counter[i]) { if (number_of_position_in_which_counters_do_not_agree != -1) { - std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n"; + std::cerr << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n"; throw std::logic_error( "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face."); } @@ -521,11 +521,11 @@ void Bitmap_cubical_complex_base::put_data_to_bins(std::size_t number_of_bins // now put the data into the appropriate bins: for (std::size_t i = 0; i != this->data.size(); ++i) { if (dbg) { - std::cerr << "Before binning : " << this->data[i] << std::endl; + std::cout << "Before binning : " << this->data[i] << std::endl; } this->data[i] = min_max.first + dx * (this->data[i] - min_max.first) / number_of_bins; if (dbg) { - std::cerr << "After binning : " << this->data[i] << std::endl; + std::cout << "After binning : " << this->data[i] << std::endl; } } } @@ -539,11 +539,11 @@ void Bitmap_cubical_complex_base::put_data_to_bins(T diameter_of_bin) { // now put the data into the appropriate bins: for (std::size_t i = 0; i != this->data.size(); ++i) { if (dbg) { - std::cerr << "Before binning : " << this->data[i] << std::endl; + std::cout << "Before binning : " << this->data[i] << std::endl; } this->data[i] = min_max.first + diameter_of_bin * (this->data[i] - min_max.first) / number_of_bins; if (dbg) { - std::cerr << "After binning : " << this->data[i] << std::endl; + std::cout << "After binning : " << this->data[i] << std::endl; } } } @@ -617,7 +617,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus inFiltration >> dimensionOfData; if (dbg) { - std::cerr << "dimensionOfData : " << dimensionOfData << std::endl; + std::cout << "dimensionOfData : " << dimensionOfData << std::endl; } std::vector sizes; @@ -630,7 +630,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus sizes.push_back(size_in_this_dimension); dimensions *= size_in_this_dimension; if (dbg) { - std::cerr << "size_in_this_dimension : " << size_in_this_dimension << std::endl; + std::cout << "size_in_this_dimension : " << size_in_this_dimension << std::endl; } } this->set_up_containers(sizes); @@ -651,7 +651,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus } if (dbg) { - std::cerr << "Cell of an index : " << it.compute_index_in_bitmap() + std::cout << "Cell of an index : " << it.compute_index_in_bitmap() << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap()) << " get the value : " << filtrationLevel << std::endl; } @@ -754,20 +754,20 @@ std::vector Bitmap_cubical_complex_base::get_coboundary_of_a_cel template unsigned Bitmap_cubical_complex_base::get_dimension_of_a_cell(std::size_t cell) const { bool dbg = false; - if (dbg) std::cerr << "\n\n\n Computing position o a cell of an index : " << cell << std::endl; + if (dbg) std::cout << "\n\n\n Computing position o a cell of an index : " << cell << std::endl; unsigned dimension = 0; for (std::size_t i = this->multipliers.size(); i != 0; --i) { unsigned position = cell / this->multipliers[i - 1]; if (dbg) { - std::cerr << "i-1 :" << i - 1 << std::endl; - std::cerr << "cell : " << cell << std::endl; - std::cerr << "position : " << position << std::endl; - std::cerr << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl; + std::cout << "i-1 :" << i - 1 << std::endl; + std::cout << "cell : " << cell << std::endl; + std::cout << "position : " << position << std::endl; + std::cout << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl; } if (position % 2 == 1) { - if (dbg) std::cerr << "Nonzero length in this direction \n"; + if (dbg) std::cout << "Nonzero length in this direction \n"; dimension++; } cell = cell % this->multipliers[i - 1]; @@ -803,7 +803,7 @@ void Bitmap_cubical_complex_base::impose_lower_star_filtration() { while (indices_to_consider.size()) { if (dbg) { - std::cerr << "indices_to_consider in this iteration \n"; + std::cout << "indices_to_consider in this iteration \n"; for (std::size_t i = 0; i != indices_to_consider.size(); ++i) { std::cout << indices_to_consider[i] << " "; } @@ -813,14 +813,14 @@ void Bitmap_cubical_complex_base::impose_lower_star_filtration() { std::vector bd = this->get_boundary_of_a_cell(indices_to_consider[i]); for (std::size_t boundaryIt = 0; boundaryIt != bd.size(); ++boundaryIt) { if (dbg) { - std::cerr << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]] + std::cout << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]] << " while of a cell: " << indices_to_consider[i] << " is: " << this->data[indices_to_consider[i]] << std::endl; } if (this->data[bd[boundaryIt]] > this->data[indices_to_consider[i]]) { this->data[bd[boundaryIt]] = this->data[indices_to_consider[i]]; if (dbg) { - std::cerr << "Setting the value of a cell : " << bd[boundaryIt] + std::cout << "Setting the value of a cell : " << bd[boundaryIt] << " to : " << this->data[indices_to_consider[i]] << std::endl; } } diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h index edd794fe..3942dc34 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h @@ -128,7 +128,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c } if (coface_counter[i] != face_counter[i]) { if (number_of_position_in_which_counters_do_not_agree != -1) { - std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n"; + std::cerr << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n"; throw std::logic_error( "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face."); } @@ -237,7 +237,7 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base::Bitmap_cubical_comp if (inFiltration.eof()) break; if (dbg) { - std::cerr << "Cell of an index : " << it.compute_index_in_bitmap() + std::cout << "Cell of an index : " << it.compute_index_in_bitmap() << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap()) << " get the value : " << filtrationLevel << std::endl; } @@ -278,7 +278,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas std::size_t cell) const { bool dbg = false; if (dbg) { - std::cerr << "Computations of boundary of a cell : " << cell << std::endl; + std::cout << "Computations of boundary of a cell : " << cell << std::endl; } std::vector boundary_elements; @@ -292,7 +292,6 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas if (position % 2 == 1) { // if there are no periodic boundary conditions in this direction, we do not have to do anything. if (!directions_in_which_periodic_b_cond_are_to_be_imposed[i - 1]) { - // std::cerr << "A\n"; if (sum_of_dimensions % 2) { boundary_elements.push_back(cell - this->multipliers[i - 1]); boundary_elements.push_back(cell + this->multipliers[i - 1]); @@ -301,12 +300,11 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; + std::cout << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; } } else { // in this direction we have to do boundary conditions. Therefore, we need to check if we are not at the end. if (position != 2 * this->sizes[i - 1] - 1) { - // std::cerr << "B\n"; if (sum_of_dimensions % 2) { boundary_elements.push_back(cell - this->multipliers[i - 1]); boundary_elements.push_back(cell + this->multipliers[i - 1]); @@ -315,10 +313,9 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; + std::cout << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; } } else { - // std::cerr << "C\n"; if (sum_of_dimensions % 2) { boundary_elements.push_back(cell - this->multipliers[i - 1]); boundary_elements.push_back(cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1]); @@ -327,7 +324,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cerr << cell - this->multipliers[i - 1] << " " + std::cout << cell - this->multipliers[i - 1] << " " << cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1] << " "; } } diff --git a/src/GudhUI/model/Model.h b/src/GudhUI/model/Model.h index dd9bdaab..f2be944f 100644 --- a/src/GudhUI/model/Model.h +++ b/src/GudhUI/model/Model.h @@ -312,7 +312,7 @@ class Model { std::cout << "Call CHOMP library\n"; int returnValue = system("homsimpl chomp.sim"); if (returnValue != 0) { - std::cout << "homsimpl (from CHOMP) failed. Please check it is installed or available in the PATH." + std::cerr << "homsimpl (from CHOMP) failed. Please check it is installed or available in the PATH." << std::endl; } } diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h index b8169c59..7a08b66f 100644 --- a/src/Nerve_GIC/include/gudhi/GIC.h +++ b/src/Nerve_GIC/include/gudhi/GIC.h @@ -344,7 +344,7 @@ class Cover_complex { if (num_edges(one_skeleton_OFF)) one_skeleton = one_skeleton_OFF; else - std::cout << "No triangulation read in OFF file!" << std::endl; + std::cerr << "No triangulation read in OFF file!" << std::endl; } public: // Set graph from Rips complex. @@ -530,7 +530,7 @@ class Cover_complex { cover_name = "coordinate " + std::to_string(k); } else{ - std::cout << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl; + std::cerr << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl; for (int i = 0; i < n; i++) func.push_back(0.0); functional_cover = true; cover_name = "null"; @@ -563,11 +563,11 @@ class Cover_complex { */ double set_automatic_resolution() { if (!functional_cover) { - std::cout << "Cover needs to come from the preimages of a function." << std::endl; + std::cerr << "Cover needs to come from the preimages of a function." << std::endl; return 0; } if (type != "Nerve" && type != "GIC") { - std::cout << "Type of complex needs to be specified." << std::endl; + std::cerr << "Type of complex needs to be specified." << std::endl; return 0; } @@ -622,11 +622,11 @@ class Cover_complex { */ void set_cover_from_function() { if (resolution_double == -1 && resolution_int == -1) { - std::cout << "Number and/or length of intervals not specified" << std::endl; + std::cerr << "Number and/or length of intervals not specified" << std::endl; return; } if (gain == -1) { - std::cout << "Gain not specified" << std::endl; + std::cerr << "Gain not specified" << std::endl; return; } @@ -991,7 +991,7 @@ class Cover_complex { color_name.append(std::to_string(k)); } else{ - std::cout << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl; + std::cerr << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl; for (int i = 0; i < n; i++) func.push_back(0.0); functional_cover = true; cover_name = "null"; @@ -1213,9 +1213,7 @@ class Cover_complex { */ void compute_distribution(unsigned int N = 100) { unsigned int sz = distribution.size(); - if (sz >= N) { - std::cout << "Already done!" << std::endl; - } else { + if (sz < N) { for (unsigned int i = 0; i < N - sz; i++) { if (verbose) std::cout << "Computing " << i << "th bootstrap, bottleneck distance = "; @@ -1319,7 +1317,7 @@ class Cover_complex { */ void find_simplices() { if (type != "Nerve" && type != "GIC") { - std::cout << "Type of complex needs to be specified." << std::endl; + std::cerr << "Type of complex needs to be specified." << std::endl; return; } diff --git a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h index b1af3503..c0aee9d0 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h +++ b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h @@ -55,9 +55,9 @@ std::vector > create_Gaussian_filter(size_t pixel_radius, do } if (dbg) { - std::cerr << "Kernel initialize \n"; - std::cerr << "pixel_radius : " << pixel_radius << std::endl; - std::cerr << "kernel.size() : " << kernel.size() << std::endl; + std::cout << "Kernel initialize \n"; + std::cout << "pixel_radius : " << pixel_radius << std::endl; + std::cout << "kernel.size() : " << kernel.size() << std::endl; getchar(); } @@ -79,12 +79,12 @@ std::vector > create_Gaussian_filter(size_t pixel_radius, do } if (dbg) { - std::cerr << "Here is the kernel : \n"; + std::cout << "Here is the kernel : \n"; for (size_t i = 0; i != kernel.size(); ++i) { for (size_t j = 0; j != kernel[i].size(); ++j) { - std::cerr << kernel[i][j] << " "; + std::cout << kernel[i][j] << " "; } - std::cerr << std::endl; + std::cout << std::endl; } } return kernel; @@ -290,16 +290,16 @@ class Persistence_heat_maps { bool dbg = false; if (this->heat_map.size() != second.heat_map.size()) { if (dbg) - std::cerr << "this->heat_map.size() : " << this->heat_map.size() + std::cout << "this->heat_map.size() : " << this->heat_map.size() << " \n second.heat_map.size() : " << second.heat_map.size() << std::endl; return false; } if (this->min_ != second.min_) { - if (dbg) std::cerr << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl; + if (dbg) std::cout << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl; return false; } if (this->max_ != second.max_) { - if (dbg) std::cerr << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl; + if (dbg) std::cout << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl; return false; } // in the other case we may assume that the persistence images are defined on the same domain. @@ -322,15 +322,15 @@ class Persistence_heat_maps { bool operator==(const Persistence_heat_maps& rhs) const { bool dbg = false; if (!this->check_if_the_same(rhs)) { - if (dbg) std::cerr << "The domains are not the same \n"; + if (dbg) std::cout << "The domains are not the same \n"; return false; // in this case, the domains are not the same, so the maps cannot be the same. } for (size_t i = 0; i != this->heat_map.size(); ++i) { for (size_t j = 0; j != this->heat_map[i].size(); ++j) { if (!almost_equal(this->heat_map[i][j], rhs.heat_map[i][j])) { if (dbg) { - std::cerr << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl; - std::cerr << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl; + std::cout << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl; + std::cout << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl; } return false; } @@ -586,14 +586,14 @@ void Persistence_heat_maps::construct(const std::vectorf = f; - if (dbg) std::cerr << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl; + if (dbg) std::cout << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl; if (min_ == max_) { - if (dbg) std::cerr << "min and max parameters will be determined based on intervals \n"; + if (dbg) std::cout << "min and max parameters will be determined based on intervals \n"; // in this case, we want the program to set up the min_ and max_ values by itself. min_ = std::numeric_limits::max(); max_ = -std::numeric_limits::max(); @@ -611,9 +611,9 @@ void Persistence_heat_maps::construct(const std::vector::construct(const std::vectorheat_map = heat_map_; - if (dbg) std::cerr << "Done creating of the heat map, now we will fill in the structure \n"; + if (dbg) std::cout << "Done creating of the heat map, now we will fill in the structure \n"; for (size_t pt_nr = 0; pt_nr != intervals_.size(); ++pt_nr) { // compute the value of intervals_[pt_nr] in the grid: @@ -638,9 +638,9 @@ void Persistence_heat_maps::construct(const std::vector((intervals_[pt_nr].second - this->min_) / (this->max_ - this->min_) * number_of_pixels); if (dbg) { - std::cerr << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl; - std::cerr << "x_grid : " << x_grid << std::endl; - std::cerr << "y_grid : " << y_grid << std::endl; + std::cout << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl; + std::cout << "x_grid : " << x_grid << std::endl; + std::cout << "y_grid : " << y_grid << std::endl; } // x_grid and y_grid gives a center of the kernel. We want to have its lower left corner. To get this, we need to @@ -650,9 +650,9 @@ void Persistence_heat_maps::construct(const std::vectorf(intervals_[pt_nr]); @@ -663,11 +663,11 @@ void Persistence_heat_maps::construct(const std::vector= 0) && (x_grid + i < this->heat_map.size()) && ((y_grid + j) >= 0) && (y_grid + j < this->heat_map.size())) { if (dbg) { - std::cerr << y_grid + j << " " << x_grid + i << std::endl; + std::cout << y_grid + j << " " << x_grid + i << std::endl; } this->heat_map[y_grid + j][x_grid + i] += scaling_value * filter[i][j]; if (dbg) { - std::cerr << "Position : (" << x_grid + i << "," << y_grid + j + std::cout << "Position : (" << x_grid + i << "," << y_grid + j << ") got increased by the value : " << filter[i][j] << std::endl; } } @@ -842,7 +842,7 @@ void Persistence_heat_maps::load_from_file(const char* file in >> this->min_ >> this->max_; if (dbg) { - std::cerr << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl; + std::cout << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl; } std::string temp; @@ -878,7 +878,7 @@ template std::vector Persistence_heat_maps::vectorize(int number_of_function) const { std::vector result; if (!discrete) { - std::cout << "No vectorize method in case of infinite dimensional vectorization" << std::endl; + std::cerr << "No vectorize method in case of infinite dimensional vectorization" << std::endl; return result; } diff --git a/src/Persistence_representations/include/gudhi/Persistence_intervals.h b/src/Persistence_representations/include/gudhi/Persistence_intervals.h index ea4220ea..f02e930e 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_intervals.h +++ b/src/Persistence_representations/include/gudhi/Persistence_intervals.h @@ -293,7 +293,7 @@ std::vector > Persistence_intervals::dominant_interval for (size_t i = 0; i != std::min(where_to_cut, position_length_vector.size()); ++i) { result.push_back(this->intervals[position_length_vector[i].first]); if (dbg) - std::cerr << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second + std::cout << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second << std::endl; } @@ -303,7 +303,7 @@ std::vector > Persistence_intervals::dominant_interval std::vector Persistence_intervals::histogram_of_lengths(size_t number_of_bins) const { bool dbg = false; - if (dbg) std::cerr << "this->intervals.size() : " << this->intervals.size() << std::endl; + if (dbg) std::cout << "this->intervals.size() : " << this->intervals.size() << std::endl; // first find the length of the longest interval: double lengthOfLongest = 0; for (size_t i = 0; i != this->intervals.size(); ++i) { @@ -313,7 +313,7 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of } if (dbg) { - std::cerr << "lengthOfLongest : " << lengthOfLongest << std::endl; + std::cout << "lengthOfLongest : " << lengthOfLongest << std::endl; } // this is a container we will use to store the resulting histogram @@ -330,10 +330,10 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of ++result[position]; if (dbg) { - std::cerr << "i : " << i << std::endl; - std::cerr << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n"; - std::cerr << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl; - std::cerr << "position : " << position << std::endl; + std::cout << "i : " << i << std::endl; + std::cout << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n"; + std::cout << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl; + std::cout << "position : " << position << std::endl; getchar(); } } @@ -342,7 +342,7 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of result.resize(number_of_bins); if (dbg) { - for (size_t i = 0; i != result.size(); ++i) std::cerr << result[i] << std::endl; + for (size_t i = 0; i != result.size(); ++i) std::cout << result[i] << std::endl; } return result; } @@ -368,7 +368,7 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do for (size_t i = 0; i != this->intervals.size(); ++i) { if (dbg) { - std::cerr << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; + std::cout << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; } size_t beginIt = 0; @@ -390,8 +390,8 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do } if (dbg) { - std::cerr << "beginIt : " << beginIt << std::endl; - std::cerr << "endIt : " << endIt << std::endl; + std::cout << "beginIt : " << beginIt << std::endl; + std::cout << "endIt : " << endIt << std::endl; } for (size_t pos = beginIt; pos != endIt; ++pos) { @@ -399,11 +399,11 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do (this->intervals[i].second - this->intervals[i].first); } if (dbg) { - std::cerr << "Result at this stage \n"; + std::cout << "Result at this stage \n"; for (size_t aa = 0; aa != result.size(); ++aa) { - std::cerr << result[aa] << " "; + std::cout << result[aa] << " "; } - std::cerr << std::endl; + std::cout << std::endl; } } return result; @@ -455,9 +455,9 @@ inline double compute_euclidean_distance(const std::pair& f, con std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) const { bool dbg = false; if (dbg) { - std::cerr << "Here are the intervals : \n"; + std::cout << "Here are the intervals : \n"; for (size_t i = 0; i != this->intervals.size(); ++i) { - std::cerr << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n"; + std::cout << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n"; } getchar(); } @@ -486,12 +486,12 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) distances_from_diagonal[i] = distanceToDiagonal; if (dbg) { - std::cerr << "Here are the distances form the point : [" << this->intervals[i].first << " , " + std::cout << "Here are the distances form the point : [" << this->intervals[i].first << " , " << this->intervals[i].second << "] in the diagram \n"; for (size_t aa = 0; aa != distancesFromI.size(); ++aa) { - std::cerr << "To : " << i + aa << " : " << distancesFromI[aa] << " "; + std::cout << "To : " << i + aa << " : " << distancesFromI[aa] << " "; } - std::cerr << std::endl; + std::cout << std::endl; getchar(); } @@ -502,18 +502,18 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) } } if (dbg) { - std::cerr << "Here is the distance matrix : \n"; + std::cout << "Here is the distance matrix : \n"; for (size_t i = 0; i != distances.size(); ++i) { for (size_t j = 0; j != distances.size(); ++j) { - std::cerr << distances[i][j] << " "; + std::cout << distances[i][j] << " "; } - std::cerr << std::endl; + std::cout << std::endl; } - std::cerr << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl; + std::cout << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl; for (size_t i = 0; i != distances_from_diagonal.size(); ++i) { - std::cerr << distances_from_diagonal[i] << " "; + std::cout << distances_from_diagonal[i] << " "; } - std::cerr << std::endl << std::endl; + std::cout << std::endl << std::endl; getchar(); } @@ -526,13 +526,13 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) if (k > distancesFromI.size()) { if (dbg) { - std::cerr << "There are not enough neighbors in your set. We set the result to plus infty \n"; + std::cout << "There are not enough neighbors in your set. We set the result to plus infty \n"; } result.push_back(std::numeric_limits::max()); } else { if (distances_from_diagonal[i] > distancesFromI[k]) { if (dbg) { - std::cerr << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n"; + std::cout << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n"; } result.push_back(distances_from_diagonal[i]); } else { diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape.h b/src/Persistence_representations/include/gudhi/Persistence_landscape.h index b819ccb6..dc93bb49 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_landscape.h +++ b/src/Persistence_representations/include/gudhi/Persistence_landscape.h @@ -343,7 +343,7 @@ class Persistence_landscape { bool dbg = false; if (dbg) { - std::cerr << "to_average.size() : " << to_average.size() << std::endl; + std::cout << "to_average.size() : " << to_average.size() << std::endl; } std::vector nextLevelMerge(to_average.size()); @@ -357,13 +357,13 @@ class Persistence_landscape { while (nextLevelMerge.size() != 1) { if (dbg) { - std::cerr << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl; + std::cout << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl; } std::vector nextNextLevelMerge; nextNextLevelMerge.reserve(to_average.size()); for (size_t i = 0; i < nextLevelMerge.size(); i = i + 2) { if (dbg) { - std::cerr << "i : " << i << std::endl; + std::cout << "i : " << i << std::endl; } Persistence_landscape* l = new Persistence_landscape; if (i + 1 != nextLevelMerge.size()) { @@ -374,7 +374,7 @@ class Persistence_landscape { nextNextLevelMerge.push_back(l); } if (dbg) { - std::cerr << "After this iteration \n"; + std::cout << "After this iteration \n"; getchar(); } @@ -471,25 +471,25 @@ Persistence_landscape::Persistence_landscape(const char* filename, size_t dimens bool operatorEqualDbg = false; bool Persistence_landscape::operator==(const Persistence_landscape& rhs) const { if (this->land.size() != rhs.land.size()) { - if (operatorEqualDbg) std::cerr << "1\n"; + if (operatorEqualDbg) std::cout << "1\n"; return false; } for (size_t level = 0; level != this->land.size(); ++level) { if (this->land[level].size() != rhs.land[level].size()) { - if (operatorEqualDbg) std::cerr << "this->land[level].size() : " << this->land[level].size() << "\n"; - if (operatorEqualDbg) std::cerr << "rhs.land[level].size() : " << rhs.land[level].size() << "\n"; - if (operatorEqualDbg) std::cerr << "2\n"; + if (operatorEqualDbg) std::cout << "this->land[level].size() : " << this->land[level].size() << "\n"; + if (operatorEqualDbg) std::cout << "rhs.land[level].size() : " << rhs.land[level].size() << "\n"; + if (operatorEqualDbg) std::cout << "2\n"; return false; } for (size_t i = 0; i != this->land[level].size(); ++i) { if (!(almost_equal(this->land[level][i].first, rhs.land[level][i].first) && almost_equal(this->land[level][i].second, rhs.land[level][i].second))) { if (operatorEqualDbg) - std::cerr << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second + std::cout << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second << "\n"; if (operatorEqualDbg) - std::cerr << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n"; - if (operatorEqualDbg) std::cerr << "3\n"; + std::cout << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n"; + if (operatorEqualDbg) std::cout << "3\n"; return false; } } @@ -507,7 +507,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( const std::vector >& p, size_t number_of_levels) { bool dbg = false; if (dbg) { - std::cerr << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )" + std::cout << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )" << std::endl; } @@ -517,9 +517,9 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( std::sort(bars.begin(), bars.end(), compare_points_sorting); if (dbg) { - std::cerr << "Bars : \n"; + std::cout << "Bars : \n"; for (size_t i = 0; i != bars.size(); ++i) { - std::cerr << bars[i].first << " " << bars[i].second << "\n"; + std::cout << bars[i].first << " " << bars[i].second << "\n"; } getchar(); } @@ -545,7 +545,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( lambda_n.push_back(characteristicPoints[0]); if (dbg) { - std::cerr << "1 Adding to lambda_n : (" << -std::numeric_limits::max() << " " << 0 << ") , (" + std::cout << "1 Adding to lambda_n : (" << -std::numeric_limits::max() << " " << 0 << ") , (" << minus_length(characteristicPoints[0]) << " " << 0 << ") , (" << characteristicPoints[0].first << " " << characteristicPoints[0].second << ") \n"; } @@ -562,13 +562,13 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(lambda_n[lambda_n.size() - 1]) - minus_length(characteristicPoints[i])) / 2); lambda_n.push_back(point); if (dbg) { - std::cerr << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n"; + std::cout << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n"; } if (dbg) { - std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " + std::cout << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << "\n"; - std::cerr << "point : " << point.first << " " << point.second << "\n"; + std::cout << "point : " << point.first << " " << point.second << "\n"; getchar(); } @@ -577,7 +577,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(point) <= birth_plus_deaths(characteristicPoints[i + p]))) { newCharacteristicPoints.push_back(characteristicPoints[i + p]); if (dbg) { - std::cerr << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " + std::cout << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << ")\n"; getchar(); } @@ -586,7 +586,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( newCharacteristicPoints.push_back(point); if (dbg) { - std::cerr << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n"; + std::cout << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n"; } while ((i + p < characteristicPoints.size()) && @@ -594,15 +594,15 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(point) >= birth_plus_deaths(characteristicPoints[i + p]))) { newCharacteristicPoints.push_back(characteristicPoints[i + p]); if (dbg) { - std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " + std::cout << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << "\n"; - std::cerr << "point : " << point.first << " " << point.second << "\n"; - std::cerr << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p]) + std::cout << "point : " << point.first << " " << point.second << "\n"; + std::cout << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p]) << " , " << birth_plus_deaths(characteristicPoints[i + p]) << "\n"; - std::cerr << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point) + std::cout << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point) << "\n"; - std::cerr << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " + std::cout << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << ")\n"; getchar(); } @@ -613,20 +613,20 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( lambda_n.push_back(std::make_pair(birth_plus_deaths(lambda_n[lambda_n.size() - 1]), 0)); lambda_n.push_back(std::make_pair(minus_length(characteristicPoints[i]), 0)); if (dbg) { - std::cerr << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0 + std::cout << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0 << ")\n"; - std::cerr << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n"; + std::cout << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n"; } } lambda_n.push_back(characteristicPoints[i]); if (dbg) { - std::cerr << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " " + std::cout << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; } } else { newCharacteristicPoints.push_back(characteristicPoints[i]); if (dbg) { - std::cerr << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " " + std::cout << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; } } @@ -730,31 +730,31 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou unsigned coordEnd = this->land[level].size() - 2; if (compute_value_at_a_given_pointDbg) { - std::cerr << "Here \n"; - std::cerr << "x : " << x << "\n"; - std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; - std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; + std::cout << "Here \n"; + std::cout << "x : " << x << "\n"; + std::cout << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; + std::cout << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; } // in this case x is outside the support of the landscape, therefore the value of the landscape is 0. if (x <= this->land[level][coordBegin].first) return 0; if (x >= this->land[level][coordEnd].first) return 0; - if (compute_value_at_a_given_pointDbg) std::cerr << "Entering to the while loop \n"; + if (compute_value_at_a_given_pointDbg) std::cout << "Entering to the while loop \n"; while (coordBegin + 1 != coordEnd) { if (compute_value_at_a_given_pointDbg) { - std::cerr << "coordBegin : " << coordBegin << "\n"; - std::cerr << "coordEnd : " << coordEnd << "\n"; - std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; - std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; + std::cout << "coordBegin : " << coordBegin << "\n"; + std::cout << "coordEnd : " << coordEnd << "\n"; + std::cout << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; + std::cout << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; } unsigned newCord = (unsigned)floor((coordEnd + coordBegin) / 2.0); if (compute_value_at_a_given_pointDbg) { - std::cerr << "newCord : " << newCord << "\n"; - std::cerr << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n"; + std::cout << "newCord : " << newCord << "\n"; + std::cout << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n"; std::cin.ignore(); } @@ -771,8 +771,8 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou << this->land[level][coordEnd].first << "\n"; std::cout << "the y coords are : " << this->land[level][coordBegin].second << " a " << this->land[level][coordEnd].second << "\n"; - std::cerr << "coordBegin : " << coordBegin << "\n"; - std::cerr << "coordEnd : " << coordEnd << "\n"; + std::cout << "coordBegin : " << coordBegin << "\n"; + std::cout << "coordEnd : " << coordEnd << "\n"; std::cin.ignore(); } return function_value(this->land[level][coordBegin], this->land[level][coordEnd], x); @@ -943,7 +943,7 @@ void Persistence_landscape::load_landscape_from_file(const char* filename) { lineSS >> endd; landscapeAtThisLevel.push_back(std::make_pair(beginn, endd)); if (dbg) { - std::cerr << "Reading a point : " << beginn << " , " << endd << std::endl; + std::cout << "Reading a point : " << beginn << " , " << endd << std::endl; } } else { if (dbg) { @@ -985,8 +985,8 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap if (operation_on_pair_of_landscapesDBG) { for (size_t i = 0; i != std::min(land1.land.size(), land2.land.size()); ++i) { - std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; - std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; + std::cout << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; + std::cout << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; } getchar(); } @@ -997,12 +997,12 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap size_t q = 0; while ((p + 1 < land1.land[i].size()) && (q + 1 < land2.land[i].size())) { if (operation_on_pair_of_landscapesDBG) { - std::cerr << "p : " << p << "\n"; - std::cerr << "q : " << q << "\n"; - std::cerr << "land1.land.size() : " << land1.land.size() << std::endl; - std::cerr << "land2.land.size() : " << land2.land.size() << std::endl; - std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; - std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; + std::cout << "p : " << p << "\n"; + std::cout << "q : " << q << "\n"; + std::cout << "land1.land.size() : " << land1.land.size() << std::endl; + std::cout << "land2.land.size() : " << land2.land.size() << std::endl; + std::cout << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; + std::cout << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; std::cout << "land1.land[i][p].first : " << land1.land[i][p].first << "\n"; std::cout << "land2.land[i][q].first : " << land2.land[i][q].first << "\n"; } @@ -1110,20 +1110,20 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, const Persistence_landscape& pl2) { bool dbg = false; - if (dbg) std::cerr << " compute_maximal_distance_non_symmetric \n"; + if (dbg) std::cout << " compute_maximal_distance_non_symmetric \n"; // this distance is not symmetric. It compute ONLY distance between inflection points of pl1 and pl2. double maxDist = 0; size_t minimalNumberOfLevels = std::min(pl1.land.size(), pl2.land.size()); for (size_t level = 0; level != minimalNumberOfLevels; ++level) { if (dbg) { - std::cerr << "Level : " << level << std::endl; - std::cerr << "PL1 : \n"; + std::cout << "Level : " << level << std::endl; + std::cout << "PL1 : \n"; for (size_t i = 0; i != pl1.land[level].size(); ++i) { - std::cerr << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n"; + std::cout << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n"; } - std::cerr << "PL2 : \n"; + std::cout << "PL2 : \n"; for (size_t i = 0; i != pl2.land[level].size(); ++i) { - std::cerr << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n"; + std::cout << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n"; } std::cin.ignore(); } @@ -1143,24 +1143,24 @@ double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, if (maxDist <= val) maxDist = val; if (dbg) { - std::cerr << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << "," + std::cout << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << "," << pl2.land[level][p2Count + 1].first << "] \n"; - std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; - std::cerr << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : " + std::cout << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; + std::cout << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : " << function_value(pl2.land[level][p2Count], pl2.land[level][p2Count + 1], pl1.land[level][i].first) << std::endl; - std::cerr << "val : " << val << std::endl; + std::cout << "val : " << val << std::endl; std::cin.ignore(); } } } - if (dbg) std::cerr << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl; + if (dbg) std::cout << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl; if (minimalNumberOfLevels < pl1.land.size()) { for (size_t level = minimalNumberOfLevels; level != pl1.land.size(); ++level) { for (size_t i = 0; i != pl1.land[level].size(); ++i) { - if (dbg) std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; + if (dbg) std::cout << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; if (maxDist < pl1.land[level][i].second) maxDist = pl1.land[level][i].second; } } @@ -1181,7 +1181,7 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const lan = lan.abs(); if (dbg) { - std::cerr << "Abs of difference ; " << lan << std::endl; + std::cout << "Abs of difference ; " << lan << std::endl; getchar(); } @@ -1189,17 +1189,17 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const // \int_{- \infty}^{+\infty}| first-second |^p double result; if (p != 1) { - if (dbg) std::cerr << "Power != 1, compute integral to the power p\n"; + if (dbg) std::cout << "Power != 1, compute integral to the power p\n"; result = lan.compute_integral_of_landscape(p); } else { - if (dbg) std::cerr << "Power = 1, compute integral \n"; + if (dbg) std::cout << "Power = 1, compute integral \n"; result = lan.compute_integral_of_landscape(); } // (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p) return pow(result, 1.0 / p); } else { // p == infty - if (dbg) std::cerr << "Power = infty, compute maximum \n"; + if (dbg) std::cout << "Power = infty, compute maximum \n"; return lan.compute_maximum(); } } @@ -1220,7 +1220,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ for (size_t level = 0; level != std::min(l1.size(), l2.size()); ++level) { if (dbg) { - std::cerr << "Computing inner product for a level : " << level << std::endl; + std::cout << "Computing inner product for a level : " << level << std::endl; getchar(); } auto&& l1_land_level = l1.land[level]; @@ -1267,14 +1267,14 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ result += contributionFromThisPart; if (dbg) { - std::cerr << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first + std::cout << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first << " , " << l1_land_level[l1It + 1].first << std::endl; - std::cerr << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first + std::cout << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first << " , " << l2_land_level[l2It + 1].first << std::endl; - std::cerr << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl; - std::cerr << "x1 : " << x1 << " , x2 : " << x2 << std::endl; - std::cerr << "contributionFromThisPart : " << contributionFromThisPart << std::endl; - std::cerr << "result : " << result << std::endl; + std::cout << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl; + std::cout << "x1 : " << x1 << " , x2 : " << x2 << std::endl; + std::cout << "contributionFromThisPart : " << contributionFromThisPart << std::endl; + std::cout << "result : " << result << std::endl; getchar(); } @@ -1290,11 +1290,11 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ // in this case, we increment both: ++l2It; if (dbg) { - std::cerr << "Incrementing both \n"; + std::cout << "Incrementing both \n"; } } else { if (dbg) { - std::cerr << "Incrementing first \n"; + std::cout << "Incrementing first \n"; } } ++l1It; @@ -1302,7 +1302,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ // in this case we increment l2It ++l2It; if (dbg) { - std::cerr << "Incrementing second \n"; + std::cout << "Incrementing second \n"; } } diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h index 68bce336..b17fc0a5 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h +++ b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h @@ -155,9 +155,9 @@ class Persistence_landscape_on_grid { double dx = (this->grid_max - this->grid_min) / static_cast(this->values_of_landscapes.size() - 1); if (dbg) { - std::cerr << "this->grid_max : " << this->grid_max << std::endl; - std::cerr << "this->grid_min : " << this->grid_min << std::endl; - std::cerr << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl; + std::cout << "this->grid_max : " << this->grid_max << std::endl; + std::cout << "this->grid_min : " << this->grid_min << std::endl; + std::cout << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl; getchar(); } @@ -169,14 +169,14 @@ class Persistence_landscape_on_grid { if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level]; if (dbg) { - std::cerr << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size() + std::cout << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size() << " , level : " << level << std::endl; if (this->values_of_landscapes[i].size() > level) - std::cerr << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl; - std::cerr << "previous_y : " << previous_y << std::endl; - std::cerr << "current_y : " << current_y << std::endl; - std::cerr << "dx : " << dx << std::endl; - std::cerr << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl; + std::cout << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl; + std::cout << "previous_y : " << previous_y << std::endl; + std::cout << "current_y : " << current_y << std::endl; + std::cout << "dx : " << dx << std::endl; + std::cout << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl; } result += 0.5 * dx * (previous_y + current_y); @@ -213,10 +213,10 @@ class Persistence_landscape_on_grid { if (this->values_of_landscapes[0].size() > level) previous_y = this->values_of_landscapes[0][level]; if (dbg) { - std::cerr << "dx : " << dx << std::endl; - std::cerr << "previous_x : " << previous_x << std::endl; - std::cerr << "previous_y : " << previous_y << std::endl; - std::cerr << "power : " << p << std::endl; + std::cout << "dx : " << dx << std::endl; + std::cout << "previous_x : " << previous_x << std::endl; + std::cout << "previous_y : " << previous_y << std::endl; + std::cout << "power : " << p << std::endl; getchar(); } @@ -225,7 +225,7 @@ class Persistence_landscape_on_grid { double current_y = 0; if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level]; - if (dbg) std::cerr << "current_y : " << current_y << std::endl; + if (dbg) std::cout << "current_y : " << current_y << std::endl; if (current_y == previous_y) continue; @@ -235,7 +235,7 @@ class Persistence_landscape_on_grid { double b = coef.second; if (dbg) { - std::cerr << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x + std::cout << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x << "," << current_y << ") is : " << a << "x+" << b << std::endl; } @@ -249,14 +249,14 @@ class Persistence_landscape_on_grid { } result += value_to_add; if (dbg) { - std::cerr << "Increasing result by : " << value_to_add << std::endl; - std::cerr << "result : " << result << std::endl; + std::cout << "Increasing result by : " << value_to_add << std::endl; + std::cout << "result : " << result << std::endl; getchar(); } previous_x = current_x; previous_y = current_y; } - if (dbg) std::cerr << "The total result is : " << result << std::endl; + if (dbg) std::cout << "The total result is : " << result << std::endl; return result; } @@ -297,10 +297,10 @@ class Persistence_landscape_on_grid { size_t position = size_t((x - this->grid_min) / dx); if (dbg) { - std::cerr << "This is a procedure compute_value_at_a_given_point \n"; - std::cerr << "level : " << level << std::endl; - std::cerr << "x : " << x << std::endl; - std::cerr << "position : " << position << std::endl; + std::cout << "This is a procedure compute_value_at_a_given_point \n"; + std::cout << "level : " << level << std::endl; + std::cout << "x : " << x << std::endl; + std::cout << "position : " << position << std::endl; } // check if we are not exactly in the grid point: if (almost_equal(position * dx + this->grid_min, x)) { @@ -432,23 +432,23 @@ class Persistence_landscape_on_grid { bool operator==(const Persistence_landscape_on_grid& rhs) const { bool dbg = true; if (this->values_of_landscapes.size() != rhs.values_of_landscapes.size()) { - if (dbg) std::cerr << "values_of_landscapes of incompatible sizes\n"; + if (dbg) std::cout << "values_of_landscapes of incompatible sizes\n"; return false; } if (!almost_equal(this->grid_min, rhs.grid_min)) { - if (dbg) std::cerr << "grid_min not equal\n"; + if (dbg) std::cout << "grid_min not equal\n"; return false; } if (!almost_equal(this->grid_max, rhs.grid_max)) { - if (dbg) std::cerr << "grid_max not equal\n"; + if (dbg) std::cout << "grid_max not equal\n"; return false; } for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) { for (size_t aa = 0; aa != this->values_of_landscapes[i].size(); ++aa) { if (!almost_equal(this->values_of_landscapes[i][aa], rhs.values_of_landscapes[i][aa])) { if (dbg) { - std::cerr << "Problem in the position : " << i << " of values_of_landscapes. \n"; - std::cerr << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl; + std::cout << "Problem in the position : " << i << " of values_of_landscapes. \n"; + std::cout << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl; } return false; } @@ -615,7 +615,7 @@ class Persistence_landscape_on_grid { double previous_y_l1 = 0; double previous_y_l2 = 0; for (size_t i = 0; i != l1.values_of_landscapes.size(); ++i) { - if (dbg) std::cerr << "i : " << i << std::endl; + if (dbg) std::cout << "i : " << i << std::endl; double current_x = previous_x + dx; double current_y_l1 = 0; @@ -625,11 +625,11 @@ class Persistence_landscape_on_grid { if (l2.values_of_landscapes[i].size() > level) current_y_l2 = l2.values_of_landscapes[i][level]; if (dbg) { - std::cerr << "previous_x : " << previous_x << std::endl; - std::cerr << "previous_y_l1 : " << previous_y_l1 << std::endl; - std::cerr << "current_y_l1 : " << current_y_l1 << std::endl; - std::cerr << "previous_y_l2 : " << previous_y_l2 << std::endl; - std::cerr << "current_y_l2 : " << current_y_l2 << std::endl; + std::cout << "previous_x : " << previous_x << std::endl; + std::cout << "previous_y_l1 : " << previous_y_l1 << std::endl; + std::cout << "current_y_l1 : " << current_y_l1 << std::endl; + std::cout << "previous_y_l2 : " << previous_y_l2 << std::endl; + std::cout << "current_y_l2 : " << current_y_l2 << std::endl; } std::pair l1_coords = compute_parameters_of_a_line(std::make_pair(previous_x, previous_y_l1), @@ -646,11 +646,11 @@ class Persistence_landscape_on_grid { double d = l2_coords.second; if (dbg) { - std::cerr << "Here are the formulas for a line: \n"; - std::cerr << "a : " << a << std::endl; - std::cerr << "b : " << b << std::endl; - std::cerr << "c : " << c << std::endl; - std::cerr << "d : " << d << std::endl; + std::cout << "Here are the formulas for a line: \n"; + std::cout << "a : " << a << std::endl; + std::cout << "b : " << b << std::endl; + std::cout << "c : " << c << std::endl; + std::cout << "d : " << d << std::endl; } // now, to compute the inner product in this interval we need to compute the integral of (ax+b)(cx+d) = acx^2 + @@ -663,11 +663,11 @@ class Persistence_landscape_on_grid { (a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x); if (dbg) { - std::cerr << "Value of the integral on the left end i.e. : " << previous_x << " is : " + std::cout << "Value of the integral on the left end i.e. : " << previous_x << " is : " << a * c / 3 * previous_x * previous_x * previous_x + (a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x << std::endl; - std::cerr << "Value of the integral on the right end i.e. : " << current_x << " is " + std::cout << "Value of the integral on the right end i.e. : " << current_x << " is " << a * c / 3 * current_x * current_x * current_x + (a * d + b * c) / 2 * current_x * current_x + b * d * current_x << std::endl; @@ -676,8 +676,8 @@ class Persistence_landscape_on_grid { result += added_value; if (dbg) { - std::cerr << "added_value : " << added_value << std::endl; - std::cerr << "result : " << result << std::endl; + std::cout << "added_value : " << added_value << std::endl; + std::cout << "result : " << result << std::endl; getchar(); } @@ -703,8 +703,8 @@ class Persistence_landscape_on_grid { // time: if (dbg) { - std::cerr << "first : " << first << std::endl; - std::cerr << "second : " << second << std::endl; + std::cout << "first : " << first << std::endl; + std::cout << "second : " << second << std::endl; getchar(); } @@ -712,14 +712,14 @@ class Persistence_landscape_on_grid { Persistence_landscape_on_grid lan = first - second; if (dbg) { - std::cerr << "Difference : " << lan << std::endl; + std::cout << "Difference : " << lan << std::endl; } //| first-second |: lan.abs(); if (dbg) { - std::cerr << "Abs : " << lan << std::endl; + std::cout << "Abs : " << lan << std::endl; } if (p < std::numeric_limits::max()) { @@ -727,18 +727,18 @@ class Persistence_landscape_on_grid { double result; if (p != 1) { if (dbg) { - std::cerr << "p : " << p << std::endl; + std::cout << "p : " << p << std::endl; getchar(); } result = lan.compute_integral_of_landscape(p); if (dbg) { - std::cerr << "integral : " << result << std::endl; + std::cout << "integral : " << result << std::endl; getchar(); } } else { result = lan.compute_integral_of_landscape(); if (dbg) { - std::cerr << "integral, without power : " << result << std::endl; + std::cout << "integral, without power : " << result << std::endl; getchar(); } } @@ -820,7 +820,7 @@ class Persistence_landscape_on_grid { this->grid_max = (to_average[0])->grid_max; if (dbg) { - std::cerr << "Computations of average. The data from the current landscape have been cleared. We are ready to do " + std::cout << "Computations of average. The data from the current landscape have been cleared. We are ready to do " "the computations. \n"; } @@ -835,7 +835,7 @@ class Persistence_landscape_on_grid { this->values_of_landscapes[grid_point] = std::vector(maximal_size_of_vector); if (dbg) { - std::cerr << "We are considering the point : " << grid_point + std::cout << "We are considering the point : " << grid_point << " of the grid. In this point, there are at most : " << maximal_size_of_vector << " nonzero landscape functions \n"; } @@ -931,12 +931,12 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto size_t number_of_points_, unsigned number_of_levels) { bool dbg = false; if (dbg) { - std::cerr << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_ + std::cout << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_ << ", grid_max_ : " << grid_max_ << ", number_of_points_ : " << number_of_points_ << ", number_of_levels: " << number_of_levels << std::endl; - std::cerr << "Here are the intervals at our disposal : \n"; + std::cout << "Here are the intervals at our disposal : \n"; for (size_t i = 0; i != p.size(); ++i) { - std::cerr << p[i].first << " , " << p[i].second << std::endl; + std::cout << p[i].first << " , " << p[i].second << std::endl; } } @@ -976,17 +976,17 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto size_t grid_interval_midpoint = (size_t)(0.5 * (grid_interval_begin + grid_interval_end)); if (dbg) { - std::cerr << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl; + std::cout << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl; - std::cerr << "grid_interval_begin : " << grid_interval_begin << std::endl; - std::cerr << "grid_interval_end : " << grid_interval_end << std::endl; - std::cerr << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl; + std::cout << "grid_interval_begin : " << grid_interval_begin << std::endl; + std::cout << "grid_interval_end : " << grid_interval_end << std::endl; + std::cout << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl; } double landscape_value = dx; for (size_t i = grid_interval_begin + 1; i < grid_interval_midpoint; ++i) { if (dbg) { - std::cerr << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value + std::cout << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value << std::endl; } if (number_of_levels != std::numeric_limits::max()) { @@ -1044,7 +1044,7 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto } if (dbg) { - std::cerr << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value + std::cout << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value << std::endl; } } diff --git a/src/Persistence_representations/include/gudhi/Persistence_vectors.h b/src/Persistence_representations/include/gudhi/Persistence_vectors.h index 6776f4a3..be985909 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_vectors.h +++ b/src/Persistence_representations/include/gudhi/Persistence_vectors.h @@ -360,9 +360,9 @@ template void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap(size_t where_to_cut) { bool dbg = false; if (dbg) { - std::cerr << "Here are the intervals : \n"; + std::cout << "Here are the intervals : \n"; for (size_t i = 0; i != this->intervals.size(); ++i) { - std::cerr << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; + std::cout << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; } } where_to_cut = std::min( @@ -385,14 +385,14 @@ void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap 0.5 * (this->intervals[j].first + this->intervals[j].second))))); if (dbg) { - std::cerr << "Value : " << value << std::endl; - std::cerr << "heap.front() : " << heap.front() << std::endl; + std::cout << "Value : " << value << std::endl; + std::cout << "heap.front() : " << heap.front() << std::endl; getchar(); } if (-value < heap.front()) { if (dbg) { - std::cerr << "Replacing : " << heap.front() << " with : " << -value << std::endl; + std::cout << "Replacing : " << heap.front() << " with : " << -value << std::endl; getchar(); } // remove the first element from the heap @@ -431,7 +431,7 @@ void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap } if (dbg) { - std::cerr << "This is the heap after all the operations :\n"; + std::cout << "This is the heap after all the operations :\n"; for (size_t i = 0; i != heap.size(); ++i) { std::cout << heap[i] << " "; } @@ -519,11 +519,11 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra bool dbg = false; if (dbg) { - std::cerr << "Entering double Vector_distances_in_diagram::distance( const Abs_Topological_data_with_distances* " + std::cout << "Entering double Vector_distances_in_diagram::distance( const Abs_Topological_data_with_distances* " "second , double power ) procedure \n"; - std::cerr << "Power : " << power << std::endl; - std::cerr << "This : " << *this << std::endl; - std::cerr << "second : " << second_ << std::endl; + std::cout << "Power : " << power << std::endl; + std::cout << "This : " << *this << std::endl; + std::cout << "second : " << second_ << std::endl; } double result = 0; @@ -531,7 +531,7 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra ++i) { if (power == 1) { if (dbg) { - std::cerr << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] + std::cout << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] << " | : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]) << std::endl; } @@ -545,7 +545,7 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra result = fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]); } if (dbg) { - std::cerr << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] + std::cout << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] << " : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]) << std::endl; } diff --git a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h index 5c2d2038..8b348fd1 100644 --- a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h +++ b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h @@ -50,7 +50,7 @@ std::vector > read_persistence_intervals_in_one_dimens final_barcode.reserve(barcode_initial.size()); if (dbg) { - std::cerr << "Here are the intervals that we read from the file : \n"; + std::cout << "Here are the intervals that we read from the file : \n"; for (size_t i = 0; i != barcode_initial.size(); ++i) { std::cout << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl; } @@ -59,7 +59,7 @@ std::vector > read_persistence_intervals_in_one_dimens for (size_t i = 0; i != barcode_initial.size(); ++i) { if (dbg) { - std::cout << "COnsidering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second + std::cout << "Considering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl; } @@ -91,11 +91,11 @@ std::vector > read_persistence_intervals_in_one_dimens } if (dbg) { - std::cerr << "Here are the final bars that we are sending further : \n"; + std::cout << "Here are the final bars that we are sending further : \n"; for (size_t i = 0; i != final_barcode.size(); ++i) { std::cout << final_barcode[i].first << " " << final_barcode[i].second << std::endl; } - std::cerr << "final_barcode.size() : " << final_barcode.size() << std::endl; + std::cout << "final_barcode.size() : " << final_barcode.size() << std::endl; getchar(); } diff --git a/src/common/include/gudhi/Debug_utils.h b/src/common/include/gudhi/Debug_utils.h index 38abc06d..d4e66d8d 100644 --- a/src/common/include/gudhi/Debug_utils.h +++ b/src/common/include/gudhi/Debug_utils.h @@ -27,7 +27,7 @@ #define GUDHI_CHECK_code(CODE) #endif -#define PRINT(a) std::cerr << #a << ": " << (a) << " (DISP)" << std::endl +#define PRINT(a) std::cout << #a << ": " << (a) << " (DISP)" << std::endl // #define DBG_VERBOSE #ifdef DBG_VERBOSE diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h index db31bf5c..ac9e987b 100644 --- a/src/common/include/gudhi/reader_utils.h +++ b/src/common/include/gudhi/reader_utils.h @@ -272,12 +272,12 @@ std::vector> read_lower_triangular_matrix_from_csv in.close(); #ifdef DEBUG_TRACES - std::cerr << "Here is the matrix we read : \n"; + std::cout << "Here is the matrix we read : \n"; for (size_t i = 0; i != result.size(); ++i) { for (size_t j = 0; j != result[i].size(); ++j) { - std::cerr << result[i][j] << " "; + std::cout << result[i][j] << " "; } - std::cerr << std::endl; + std::cout << std::endl; } #endif // DEBUG_TRACES -- cgit v1.2.3 From 587a845289a4e29014f67d4c3379b2b4d6b1f102 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 14 Jan 2020 14:40:41 +0100 Subject: print errors to stderr --- ...lpha_complex_diagram_persistence_from_off_file_example.py | 3 ++- .../example/alpha_rips_persistence_bottleneck_distance.py | 3 ++- ...ness_complex_diagram_persistence_from_off_file_example.py | 3 ++- ...ness_complex_diagram_persistence_from_off_file_example.py | 3 ++- ..._complex_barcode_persistence_from_perseus_file_example.py | 3 ++- ...rips_complex_diagram_persistence_from_off_file_example.py | 3 ++- ...angential_complex_plain_homology_from_off_file_example.py | 3 ++- src/python/gudhi/alpha_complex.pyx | 2 ++ src/python/gudhi/cubical_complex.pyx | 9 ++++++--- src/python/gudhi/nerve_gic.pyx | 12 +++++++----- src/python/gudhi/off_reader.pyx | 4 +++- src/python/gudhi/periodic_cubical_complex.pyx | 8 +++++--- src/python/test/test_subsampling.py | 1 - 13 files changed, 37 insertions(+), 20 deletions(-) diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py index 4079a469..6afaf533 100755 --- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -64,6 +65,6 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py index d5c33ec8..7b4aa3e7 100755 --- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py +++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py @@ -3,6 +3,7 @@ import gudhi import argparse import math +import sys """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. @@ -100,6 +101,6 @@ with open(args.file, "r") as f: print(message) else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py index 4903667e..f61d692b 100755 --- a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -79,6 +80,6 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py index 339a8577..aaa03dad 100755 --- a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -78,6 +79,6 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py index c692e66f..97bfd49f 100755 --- a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py +++ b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -73,4 +74,4 @@ if is_file_perseus(args.file): gudhi.plot_persistence_barcode(diag) plot.show() else: - print(args.file, "is not a valid perseus style file") + print(args.file, "is not a valid perseus style file", file=sys.stderr) diff --git a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py index c757aca7..5d8f057b 100755 --- a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -68,6 +69,6 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py index f0df2189..77ac2ea7 100755 --- a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py +++ b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py @@ -2,6 +2,7 @@ import argparse import matplotlib.pyplot as plot +import sys import gudhi """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -60,6 +61,6 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file") + print(args.file, "is not a valid OFF file", file=sys.stderr) f.close() diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx index db11416c..4ff37437 100644 --- a/src/python/gudhi/alpha_complex.pyx +++ b/src/python/gudhi/alpha_complex.pyx @@ -7,12 +7,14 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool from libc.stdint cimport intptr_t +import sys import os from gudhi.simplex_tree cimport * diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 92ff6411..28913a32 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -7,11 +7,13 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool +import sys import os import numpy as np @@ -87,10 +89,11 @@ cdef class CubicalComplex: if os.path.isfile(perseus_file): self.thisptr = new Bitmap_cubical_complex_base_interface(str.encode(perseus_file)) else: - print("file " + perseus_file + " not found.") + print("file " + perseus_file + " not found.", file=sys.stderr) else: print("CubicalComplex can be constructed from dimensions and " - "top_dimensional_cells or from a Perseus-style file name.") + "top_dimensional_cells or from a Perseus-style file name.", + file=sys.stderr) def __dealloc__(self): if self.thisptr != NULL: @@ -199,5 +202,5 @@ cdef class CubicalComplex: intervals_result = self.pcohptr.intervals_in_dimension(dimension) else: print("intervals_in_dim function requires persistence function" - " to be launched first.") + " to be launched first.", file=sys.stderr) return np.array(intervals_result) diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx index 68c06432..5eb9be0d 100644 --- a/src/python/gudhi/nerve_gic.pyx +++ b/src/python/gudhi/nerve_gic.pyx @@ -7,11 +7,13 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool +import sys import os from libc.stdint cimport intptr_t @@ -182,7 +184,7 @@ cdef class CoverComplex: if os.path.isfile(off_file): return self.thisptr.read_point_cloud(str.encode(off_file)) else: - print("file " + off_file + " not found.") + print("file " + off_file + " not found.", file=sys.stderr) return False def set_automatic_resolution(self): @@ -214,7 +216,7 @@ cdef class CoverComplex: if os.path.isfile(color_file_name): self.thisptr.set_color_from_file(str.encode(color_file_name)) else: - print("file " + color_file_name + " not found.") + print("file " + color_file_name + " not found.", file=sys.stderr) def set_color_from_range(self, color): """Computes the function used to color the nodes of the simplicial @@ -235,7 +237,7 @@ cdef class CoverComplex: if os.path.isfile(cover_file_name): self.thisptr.set_cover_from_file(str.encode(cover_file_name)) else: - print("file " + cover_file_name + " not found.") + print("file " + cover_file_name + " not found.", file=sys.stderr) def set_cover_from_function(self): """Creates a cover C from the preimages of the function f. @@ -268,7 +270,7 @@ cdef class CoverComplex: if os.path.isfile(func_file_name): self.thisptr.set_function_from_file(str.encode(func_file_name)) else: - print("file " + func_file_name + " not found.") + print("file " + func_file_name + " not found.", file=sys.stderr) def set_function_from_range(self, function): """Creates the function f from a vector stored in memory. @@ -309,7 +311,7 @@ cdef class CoverComplex: if os.path.isfile(graph_file_name): self.thisptr.set_graph_from_file(str.encode(graph_file_name)) else: - print("file " + graph_file_name + " not found.") + print("file " + graph_file_name + " not found.", file=sys.stderr) def set_graph_from_OFF(self): """Creates a graph G from the triangulation given by the input OFF diff --git a/src/python/gudhi/off_reader.pyx b/src/python/gudhi/off_reader.pyx index 58f05db8..ef8f420a 100644 --- a/src/python/gudhi/off_reader.pyx +++ b/src/python/gudhi/off_reader.pyx @@ -7,9 +7,11 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.string cimport string +import sys import os __author__ = "Vincent Rouvreau" @@ -32,6 +34,6 @@ def read_points_from_off_file(off_file=''): if os.path.isfile(off_file): return read_points_from_OFF_file(str.encode(off_file)) else: - print("file " + off_file + " not found.") + print("file " + off_file + " not found.", file=sys.stderr) return [] diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index b5dece10..4ec06524 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -7,11 +7,13 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool +import sys import os import numpy as np @@ -95,12 +97,12 @@ cdef class PeriodicCubicalComplex: if os.path.isfile(perseus_file): self.thisptr = new Periodic_cubical_complex_base_interface(str.encode(perseus_file)) else: - print("file " + perseus_file + " not found.") + print("file " + perseus_file + " not found.", file=sys.stderr) else: print("CubicalComplex can be constructed from dimensions, " "top_dimensional_cells and periodic_dimensions, or from " "top_dimensional_cells and periodic_dimensions or from " - "a Perseus-style file name.") + "a Perseus-style file name.", file=sys.stderr) def __dealloc__(self): if self.thisptr != NULL: @@ -209,5 +211,5 @@ cdef class PeriodicCubicalComplex: intervals_result = self.pcohptr.intervals_in_dimension(dimension) else: print("intervals_in_dim function requires persistence function" - " to be launched first.") + " to be launched first.", file=sys.stderr) return np.array(intervals_result) diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py index fe0985fa..31f64e32 100755 --- a/src/python/test/test_subsampling.py +++ b/src/python/test/test_subsampling.py @@ -120,7 +120,6 @@ def test_simple_pick_n_random_points(): # Go furter than point set on purpose for iter in range(1, 10): sub_set = gudhi.pick_n_random_points(points=point_set, nb_points=iter) - print(5) for sub in sub_set: found = False for point in point_set: -- cgit v1.2.3 From 436a7fbe36a9de6a969afd5978c3d496773a8690 Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 16 Jan 2020 15:46:41 -0500 Subject: added wrapper functions --- src/python/gudhi/cubical_complex.pyx | 29 ++++++++- src/python/gudhi/simplex_tree.pxd | 1 + src/python/gudhi/simplex_tree.pyx | 28 +++++++- .../include/Persistent_cohomology_interface.h | 76 ++++++++++++++++++++++ 4 files changed, 132 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index cbeda014..5562e8a7 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -31,6 +31,7 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>": Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[pair[double, int], pair[double, int]]]] get_persistence_cubical_generators(int homology_coeff_field, double min_persistence) vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -85,7 +86,7 @@ cdef class CubicalComplex: elif ((dimensions is None) and (top_dimensional_cells is None) and (perseus_file != '')): if os.path.isfile(perseus_file): - self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8')) + self.thisptr = new Bitmap_cubical_complex_base_interface(str.encode(perseus_file)) else: print("file " + perseus_file + " not found.") else: @@ -145,6 +146,32 @@ cdef class CubicalComplex: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result + def persistence_generators(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): + """This function returns the persistence of the simplicial complex. + + :param homology_coeff_field: The homology coefficient field. Must be a + prime number. Default value is 11. + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :param persistence_dim_max: If true, the persistent homology for the + maximal dimension in the complex is computed. If false, it is + ignored. Default is false. + :type persistence_dim_max: bool + :returns: The persistence of the simplicial complex, together with the corresponding generators, i.e., the positive and negative top-dimensional cells. + :rtype: list of pairs(dimension, pair(index of positive top-dimensional cell, index of negative top-dimensional cell)) + """ + if self.pcohptr != NULL: + del self.pcohptr + self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) + cdef vector[pair[int, pair[pair[double, int], pair[double, int]]]] persistence_result + if self.pcohptr != NULL: + persistence_result = self.pcohptr.get_persistence_cubical_generators(homology_coeff_field, min_persistence) + return persistence_result + def betti_numbers(self): """This function returns the Betti numbers of the complex. diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 1066d44b..9e52a8aa 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -48,6 +48,7 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[pair[double, vector[int]], pair[double, vector[int]]]]] get_persistence_generators(int homology_coeff_field, double min_persistence) vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index b18627c4..8cc58f8f 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -412,6 +412,32 @@ cdef class SimplexTree: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result + def persistence_generators(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): + """This function returns the persistence of the simplicial complex. + + :param homology_coeff_field: The homology coefficient field. Must be a + prime number. Default value is 11. + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :param persistence_dim_max: If true, the persistent homology for the + maximal dimension in the complex is computed. If false, it is + ignored. Default is false. + :type persistence_dim_max: bool + :returns: The persistence of the simplicial complex, together with the corresponding generators, i.e., the positive and negative simplices. + :rtype: list of pairs(dimension, pair(positive_simplex, negative_simplex)) + """ + if self.pcohptr != NULL: + del self.pcohptr + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) + cdef vector[pair[int, pair[pair[double, vector[int]], pair[double, vector[int]]]]] persistence_result + if self.pcohptr != NULL: + persistence_result = self.pcohptr.get_persistence_generators(homology_coeff_field, min_persistence) + return persistence_result + def betti_numbers(self): """This function returns the Betti numbers of the simplicial complex. @@ -508,7 +534,7 @@ cdef class SimplexTree: """ if self.pcohptr != NULL: if persistence_file != '': - self.pcohptr.write_output_diagram(persistence_file.encode('utf-8')) + self.pcohptr.write_output_diagram(str.encode(persistence_file)) else: print("persistence_file must be specified") else: diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 8c79e6f3..774eb56a 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -73,6 +73,82 @@ persistent_cohomology::Persistent_cohomology>, std::pair>>>> get_persistence_generators(int homology_coeff_field, + double min_persistence) { + persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); + persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); + + // Custom sort and output persistence + cmp_intervals_by_dim_then_length cmp(stptr_); + auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); + std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); + + std::vector>, std::pair>>>> persistence; + for (auto pair : persistent_pairs) { + std::vector splx0, splx1; + for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))){splx0.push_back(vertex);} + if (isfinite(stptr_->filtration(get<1>(pair)))){ for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))){splx1.push_back(vertex);}} + persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), std::make_pair(std::make_pair(stptr_->filtration(get<0>(pair)), splx0), std::make_pair(stptr_->filtration(get<1>(pair)), splx1)))); + } + return persistence; + } + + void top_dimensional_cofaces(std::vector & cofaces, int splx){ + if (stptr_->dimension(stptr_->simplex(splx)) == stptr_->dimension()){cofaces.push_back(stptr_->simplex(splx));} + else{ for (auto v : stptr_->coboundary_simplex_range(stptr_->simplex(splx))){top_dimensional_cofaces(cofaces, stptr_->key(v));} } + } + + std::vector, std::pair>>> get_persistence_cubical_generators(int homology_coeff_field, + double min_persistence) { + + // Gather all top-dimensional cells and store their simplex handles + std::vector max_splx; for (auto splx : stptr_->filtration_simplex_range()){ if (stptr_->dimension(splx) == stptr_->dimension()) max_splx.push_back(splx); } + // Sort these simplex handles and compute the ordering function + // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data + std::map order; std::sort(max_splx.begin(), max_splx.end()); for (int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); + + persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); + persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); + + // Custom sort and output persistence + cmp_intervals_by_dim_then_length cmp(stptr_); + auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); + std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); + + std::vector, std::pair>>> persistence; + for (auto pair : persistent_pairs) { + + int splx0, splx1; + + double f0 = stptr_->filtration(get<0>(pair)); + // Recursively get the top-dimensional cells / cofaces associated to the persistence generator + std::vector faces0; top_dimensional_cofaces(faces0, stptr_->key(get<0>(pair))); + // Find the top-dimensional cell / coface with the same filtration value + int cf; for (int i = 0; i < faces0.size(); i++){ if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} + // Retrieve the index of the corresponding top-dimensional cell in the input data + splx0 = order[faces0[cf]]; + + if (isfinite(stptr_->filtration(get<1>(pair)))){ + double f1 = stptr_->filtration(get<1>(pair)); + // Recursively get the top-dimensional cells / cofaces associated to the persistence generator + std::vector faces1; top_dimensional_cofaces(faces1, stptr_->key(get<1>(pair))); + // Find the top-dimensional cell / coface with the same filtration value + int cf; for (int i = 0; i < faces0.size(); i++){ if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} + // Retrieve the index of the corresponding top-dimensional cell in the input data + splx1 = order[faces1[cf]]; + } + + persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), std::make_pair(std::make_pair(stptr_->filtration(get<0>(pair)), splx0), std::make_pair(stptr_->filtration(get<1>(pair)), splx1)))); + } + return persistence; + } + std::vector, std::vector>> persistence_pairs() { auto pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); -- cgit v1.2.3 From 5694670b3e20f0cb935a751614ef12b6009a60c0 Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 16 Jan 2020 15:58:15 -0500 Subject: fix to detect infinite persistence --- src/python/include/Persistent_cohomology_interface.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 774eb56a..acc32b21 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -124,16 +124,15 @@ persistent_cohomology::Persistent_cohomology, std::pair>>> persistence; for (auto pair : persistent_pairs) { - int splx0, splx1; - double f0 = stptr_->filtration(get<0>(pair)); // Recursively get the top-dimensional cells / cofaces associated to the persistence generator std::vector faces0; top_dimensional_cofaces(faces0, stptr_->key(get<0>(pair))); // Find the top-dimensional cell / coface with the same filtration value int cf; for (int i = 0; i < faces0.size(); i++){ if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} // Retrieve the index of the corresponding top-dimensional cell in the input data - splx0 = order[faces0[cf]]; + int splx0 = order[faces0[cf]]; + int splx1 = -1; if (isfinite(stptr_->filtration(get<1>(pair)))){ double f1 = stptr_->filtration(get<1>(pair)); // Recursively get the top-dimensional cells / cofaces associated to the persistence generator -- cgit v1.2.3 From c89df405c77bb7270db1a7d8f0e49bc22c1b010d Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 16 Jan 2020 16:17:38 -0500 Subject: fix typo + coboundary error --- src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h | 1 + src/python/include/Persistent_cohomology_interface.h | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h index 37514dee..bf09532e 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h @@ -340,6 +340,7 @@ class Bitmap_cubical_complex : public T { * that provides ranges for the Boundary_simplex_iterator. **/ Boundary_simplex_range boundary_simplex_range(Simplex_handle sh) { return this->get_boundary_of_a_cell(sh); } + Boundary_simplex_range coboundary_simplex_range(Simplex_handle sh) { return this->get_coboundary_of_a_cell(sh); } /** * filtration_simplex_range creates an object of a Filtration_simplex_range class diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index acc32b21..0ad14477 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -108,7 +108,7 @@ persistent_cohomology::Persistent_cohomology max_splx; for (auto splx : stptr_->filtration_simplex_range()){ if (stptr_->dimension(splx) == stptr_->dimension()) max_splx.push_back(splx); } // Sort these simplex handles and compute the ordering function // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data - std::map order; std::sort(max_splx.begin(), max_splx.end()); for (int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); + std::map order; std::sort(max_splx.begin(), max_splx.end()); for (unsigned int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); @@ -128,7 +128,7 @@ persistent_cohomology::Persistent_cohomology faces0; top_dimensional_cofaces(faces0, stptr_->key(get<0>(pair))); // Find the top-dimensional cell / coface with the same filtration value - int cf; for (int i = 0; i < faces0.size(); i++){ if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} + int cf; for (unsigned int i = 0; i < faces0.size(); i++){if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} // Retrieve the index of the corresponding top-dimensional cell in the input data int splx0 = order[faces0[cf]]; @@ -138,7 +138,7 @@ persistent_cohomology::Persistent_cohomology faces1; top_dimensional_cofaces(faces1, stptr_->key(get<1>(pair))); // Find the top-dimensional cell / coface with the same filtration value - int cf; for (int i = 0; i < faces0.size(); i++){ if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} + int cf; for (unsigned int i = 0; i < faces1.size(); i++){if (stptr_->filtration(faces1[i]) == f1){cf = i; break;}} // Retrieve the index of the corresponding top-dimensional cell in the input data splx1 = order[faces1[cf]]; } -- cgit v1.2.3 From 85ceea9512634a62664208cd2d0f1ce48bafa171 Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 16 Jan 2020 17:02:55 -0500 Subject: added wasserstein class --- .../diagram_vectorizations_distances_kernels.py | 7 ++- src/python/gudhi/representations/metrics.py | 59 ++++++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index 119072eb..66c32cc2 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -9,7 +9,7 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet TopologicalVector, DiagramScaler, BirthPersistenceTransform,\ PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\ - SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel + SlicedWassersteinKernel, BottleneckDistance, WassersteinDistance, PersistenceFisherKernel D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]]) diags = [D] @@ -117,6 +117,11 @@ X = SW.fit(diags) Y = SW.transform(diags2) print("SW kernel is " + str(Y[0][0])) +W = WassersteinDistance(order=2, internal_p=2) +X = W.fit(diags) +Y = W.transform(diags2) +print("Wasserstein distance is " + str(Y[0][0])) + W = BottleneckDistance(epsilon=.001) X = W.fit(diags) Y = W.transform(diags2) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 5f9ec6ab..290c1d07 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -10,6 +10,7 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances +from gudhi.wasserstein import wasserstein_distance try: from .. import bottleneck_distance USE_GUDHI = True @@ -145,6 +146,64 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): return Xfit +class WassersteinDistance(BaseEstimator, TransformerMixin): + """ + This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams. + """ + def __init__(self, order=2, internal_p=2): + """ + Constructor for the WassersteinDistance class. + + Parameters: + order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`. + internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`. + """ + self.order, self.internal_p = order, internal_p + + def fit(self, X, y=None): + """ + Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**. + + Parameters: + X (list of n x 2 numpy arrays): input persistence diagrams. + y (n x 1 array): persistence diagram labels (unused). + """ + self.diagrams_ = X + return self + + def transform(self, X): + """ + Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams. + + Parameters: + X (list of n x 2 numpy arrays): input persistence diagrams. + + Returns: + numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances. + """ + num_diag1 = len(X) + + #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): + if X is self.diagrams_: + matrix = np.zeros((num_diag1, num_diag1)) + + for i in range(num_diag1): + for j in range(i+1, num_diag1): + matrix[i,j] = wasserstein_distance(X[i], X[j], self.order, self.internal_p) + matrix[j,i] = matrix[i,j] + + else: + num_diag2 = len(self.diagrams_) + matrix = np.zeros((num_diag1, num_diag2)) + + for i in range(num_diag1): + for j in range(num_diag2): + matrix[i,j] = wasserstein_distance(X[i], self.diagrams_[j], self.order, self.internal_p) + + Xfit = matrix + + return Xfit + class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. -- cgit v1.2.3 From 19562b27182dcfa6ed262002c2bc8934382f5a53 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 16 Jan 2020 21:26:02 -0500 Subject: get rid of persistence_generators and modified name for cubical complex --- src/python/gudhi/cubical_complex.pyx | 8 +++--- src/python/gudhi/simplex_tree.pxd | 1 - src/python/gudhi/simplex_tree.pyx | 26 ------------------- .../include/Persistent_cohomology_interface.h | 29 +++------------------- 4 files changed, 8 insertions(+), 56 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 5562e8a7..8ea31486 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -31,7 +31,7 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>": Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) - vector[pair[int, pair[pair[double, int], pair[double, int]]]] get_persistence_cubical_generators(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[pair[double, int], pair[double, int]]]] get_cofaces_of_cubical_persistence_pairs(int homology_coeff_field, double min_persistence) vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -146,7 +146,7 @@ cdef class CubicalComplex: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result - def persistence_generators(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): + def cofaces_of_cubical_persistence_pairs(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): """This function returns the persistence of the simplicial complex. :param homology_coeff_field: The homology coefficient field. Must be a @@ -161,7 +161,7 @@ cdef class CubicalComplex: maximal dimension in the complex is computed. If false, it is ignored. Default is false. :type persistence_dim_max: bool - :returns: The persistence of the simplicial complex, together with the corresponding generators, i.e., the positive and negative top-dimensional cells. + :returns: The persistence of the simplicial complex, together with the cofaces of the corresponding generators, i.e., the top-dimensional cells/cofaces of the positive and negative simplices. :rtype: list of pairs(dimension, pair(index of positive top-dimensional cell, index of negative top-dimensional cell)) """ if self.pcohptr != NULL: @@ -169,7 +169,7 @@ cdef class CubicalComplex: self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) cdef vector[pair[int, pair[pair[double, int], pair[double, int]]]] persistence_result if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_persistence_cubical_generators(homology_coeff_field, min_persistence) + persistence_result = self.pcohptr.get_cofaces_of_cubical_persistence_pairs(homology_coeff_field, min_persistence) return persistence_result def betti_numbers(self): diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 9e52a8aa..1066d44b 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -48,7 +48,6 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) - vector[pair[int, pair[pair[double, vector[int]], pair[double, vector[int]]]]] get_persistence_generators(int homology_coeff_field, double min_persistence) vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 8cc58f8f..85d25492 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -412,32 +412,6 @@ cdef class SimplexTree: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result - def persistence_generators(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): - """This function returns the persistence of the simplicial complex. - - :param homology_coeff_field: The homology coefficient field. Must be a - prime number. Default value is 11. - :type homology_coeff_field: int. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Sets min_persistence to -1.0 to see all values. - :type min_persistence: float. - :param persistence_dim_max: If true, the persistent homology for the - maximal dimension in the complex is computed. If false, it is - ignored. Default is false. - :type persistence_dim_max: bool - :returns: The persistence of the simplicial complex, together with the corresponding generators, i.e., the positive and negative simplices. - :rtype: list of pairs(dimension, pair(positive_simplex, negative_simplex)) - """ - if self.pcohptr != NULL: - del self.pcohptr - self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) - cdef vector[pair[int, pair[pair[double, vector[int]], pair[double, vector[int]]]]] persistence_result - if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_persistence_generators(homology_coeff_field, min_persistence) - return persistence_result - def betti_numbers(self): """This function returns the Betti numbers of the simplicial complex. diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 0ad14477..1a1e716e 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -73,36 +73,15 @@ persistent_cohomology::Persistent_cohomology>, std::pair>>>> get_persistence_generators(int homology_coeff_field, - double min_persistence) { - persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); - persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); - - // Custom sort and output persistence - cmp_intervals_by_dim_then_length cmp(stptr_); - auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); - std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); - - std::vector>, std::pair>>>> persistence; - for (auto pair : persistent_pairs) { - std::vector splx0, splx1; - for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))){splx0.push_back(vertex);} - if (isfinite(stptr_->filtration(get<1>(pair)))){ for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))){splx1.push_back(vertex);}} - persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), std::make_pair(std::make_pair(stptr_->filtration(get<0>(pair)), splx0), std::make_pair(stptr_->filtration(get<1>(pair)), splx1)))); - } - return persistence; - } - void top_dimensional_cofaces(std::vector & cofaces, int splx){ if (stptr_->dimension(stptr_->simplex(splx)) == stptr_->dimension()){cofaces.push_back(stptr_->simplex(splx));} else{ for (auto v : stptr_->coboundary_simplex_range(stptr_->simplex(splx))){top_dimensional_cofaces(cofaces, stptr_->key(v));} } } - std::vector, std::pair>>> get_persistence_cubical_generators(int homology_coeff_field, - double min_persistence) { + std::vector, std::pair>>> get_cofaces_of_cubical_persistence_pairs(int homology_coeff_field, + double min_persistence) { + + // Warning: this function is meant to be used with CubicalComplex only!! // Gather all top-dimensional cells and store their simplex handles std::vector max_splx; for (auto splx : stptr_->filtration_simplex_range()){ if (stptr_->dimension(splx) == stptr_->dimension()) max_splx.push_back(splx); } -- cgit v1.2.3 From 62e92e64bd97ec0bd26c31e071228f7d7c78b0e5 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 16 Jan 2020 21:29:55 -0500 Subject: fixed typo for CubicalComplex --- src/python/gudhi/cubical_complex.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 8ea31486..bd432834 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -86,7 +86,7 @@ cdef class CubicalComplex: elif ((dimensions is None) and (top_dimensional_cells is None) and (perseus_file != '')): if os.path.isfile(perseus_file): - self.thisptr = new Bitmap_cubical_complex_base_interface(str.encode(perseus_file)) + self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8')) else: print("file " + perseus_file + " not found.") else: -- cgit v1.2.3 From 6a6bed7ca21c1ffcf6de9ed09c2a6512ecb66585 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 17 Jan 2020 15:37:03 +0100 Subject: improving doc output --- src/python/doc/barycenter_sum.inc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/doc/barycenter_sum.inc b/src/python/doc/barycenter_sum.inc index afac07d7..da2bdd84 100644 --- a/src/python/doc/barycenter_sum.inc +++ b/src/python/doc/barycenter_sum.inc @@ -7,11 +7,11 @@ | :figclass: align-center | Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is | :Introduced in: GUDHI 3.1.0 | | | defined as a minimizer of the variance functional, that is of | | | Illustration of Frechet mean between persistence | :math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. | :Copyright: MIT | - | diagrams. | where :math:`d_2` denotes the Wasserstein-2 distance between persis- | | - | | tence diagrams. | | + | diagrams. | where :math:`d_2` denotes the Wasserstein-2 distance between | | + | | persistence diagrams. | | | | It is known to exist and is generically unique. However, an exact | | - | | computation is in general untractable. Current implementation avai- | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | - | | -lable is based on [Turner et al, 2014], and uses an EM-scheme to | | + | | computation is in general untractable. Current implementation | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | + | | available is based on [Turner et al, 2014], and uses an EM-scheme to | | | | provide a local minimum of the variance functional (somewhat similar | | | | to the Lloyd algorithm to estimate a solution to the k-means | | | | problem). The local minimum returned depends on the initialization of| | -- cgit v1.2.3 From a7f3167ffb465bd6d1e3b9e40bc6f1c35daf87fc Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Jan 2020 16:43:37 +0100 Subject: Simplify the pybind11 code --- src/python/doc/wasserstein_distance_user.rst | 5 +++-- src/python/gudhi/hera.cc | 19 +++++-------------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 6cd7f3a0..355ad247 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -11,8 +11,9 @@ Definition Functions --------- -This implementation is based on ideas from "Large Scale Computation of Means -and Cluster for Persistence Diagrams via Optimal Transport". +This implementation uses the Python Optimal Transport library and is based on +ideas from "Large Scale Computation of Means and Cluster for Persistence +Diagrams via Optimal Transport". .. autofunction:: gudhi.wasserstein.wasserstein_distance diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 898040fb..61f0da10 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -10,16 +10,6 @@ namespace py = pybind11; typedef py::array_t Dgm; -namespace hera { -template <> struct DiagramTraits{ - using PointType = std::array; - using RealType = double; - - static RealType get_x(const PointType& p) { return std::get<0>(p); } - static RealType get_y(const PointType& p) { return std::get<1>(p); } -}; -} - double wasserstein_distance( Dgm d1, Dgm d2, double wasserstein_power, double internal_p, @@ -32,7 +22,7 @@ double wasserstein_distance( throw std::runtime_error("Diagram 1 must be an array of size n x 2"); if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0)) throw std::runtime_error("Diagram 2 must be an array of size n x 2"); - typedef hera::DiagramTraits::PointType Point; + typedef std::array Point; auto p1 = (Point*)buf1.ptr; auto p2 = (Point*)buf2.ptr; auto diag1 = boost::make_iterator_range(p1, p1+buf1.shape[0]); @@ -52,16 +42,17 @@ PYBIND11_MODULE(hera, m) { m.def("wasserstein_distance", &wasserstein_distance, py::arg("X"), py::arg("Y"), // Should we name those q, p and d instead? - py::arg("wasserstein_power") = 1, + py::arg("order") = 1, py::arg("internal_p") = std::numeric_limits::infinity(), py::arg("delta") = .01, R"pbdoc( - Compute the Wasserstein distance between two diagrams. Points at infinity are supported. + Compute the Wasserstein distance between two diagrams. + Points at infinity are supported. Parameters: X (n x 2 numpy array): First diagram Y (n x 2 numpy array): Second diagram - wasserstein_power (float): Wasserstein degree W_q + order (float): Wasserstein exponent W_q internal_p (float): Internal Minkowski norm L^p in R^2 delta (float): Relative error 1+delta -- cgit v1.2.3 From 1783c047302414bbcd6ff4f7c73dcc5a6501fd81 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Jan 2020 17:51:28 +0100 Subject: Share tests for wasserstein_distance --- src/python/test/test_wasserstein_distance.py | 59 ++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 43dda77e..46a7079f 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -8,41 +8,66 @@ - YYYY/MM Author: Description of the modification """ -from gudhi.wasserstein import wasserstein_distance +from gudhi.wasserstein import wasserstein_distance as pot +from gudhi.hera import wasserstein_distance as hera import numpy as np __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" __license__ = "MIT" - -def test_basic_wasserstein(): +def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) diag2 = np.array([[2.8, 4.45], [9.5, 14.1]]) diag3 = np.array([[0, 2], [4, 6]]) diag4 = np.array([[0, 3], [4, 8]]) - emptydiag = np.array([[]]) + emptydiag = np.array([]) + + # We just need to handle positive numbers here + def approx(a, b): + f = 1 + delta + return a <= b*f and b <= a*f assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=1.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=1.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=2.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=2.) == 0. - assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == 2. - assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == 4. + assert approx(wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.), 2.) + assert approx(wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.), 4.) + + assert approx(wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.), 5.) # thank you Pythagorician triplets + assert approx(wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.), 2.5) + assert approx(wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.), 3.5355339059327378) + + assert approx(wasserstein_distance(diag1, diag2, internal_p=2., order=1.) , 1.4453593023967701) + assert approx(wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74), 0.9772734057168739) + + assert approx(wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863), 3.141592214572228) + + assert approx(wasserstein_distance(diag3, diag4, internal_p=1., order=1.), 3.) + assert approx(wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.), 3.) # no diag matching here + assert approx(wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.), np.sqrt(5)) + assert approx(wasserstein_distance(diag3, diag4, internal_p=1., order=2.), np.sqrt(5)) + assert approx(wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.), np.sqrt(5)) + + if(not test_infinity): + return - assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == 5. # thank you Pythagorician triplets - assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == 2.5 - assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == 3.5355339059327378 + diag5 = np.array([[0, 3], [4, np.inf]]) + diag6 = np.array([[7, 8], [4, 6], [3, np.inf]]) - assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == 1.4453593023967701 - assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == 0.9772734057168739 + assert wasserstein_distance(diag4, diag5) == np.inf + assert approx(wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf), 4.) - assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == 3.141592214572228 +def hera_wrap(delta): + def fun(*kargs,**kwargs): + return hera(*kargs,**kwargs,delta=delta) + return fun - assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == 3. - assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == 3. # no diag matching here - assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == np.sqrt(5) - assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == np.sqrt(5) - assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == np.sqrt(5) +def test_wasserstein_distance_pot(): + _basic_wasserstein(pot, 1e-15, False) +def test_wasserstein_distance_hera(): + _basic_wasserstein(hera_wrap(1e-12), 1e-12) + _basic_wasserstein(hera_wrap(.1), .1) -- cgit v1.2.3 From a02397ba04d707dc79736ce2f598ebb74459bf90 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Jan 2020 18:20:45 +0100 Subject: Mention submodules in README --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 167a38b3..f7e3d70c 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,15 @@ The GUDHI library is a generic open source C++ library, with a Python interface, for Topological Data Analysis (TDA) and Higher Dimensional Geometry Understanding. The library offers state-of-the-art data structures and algorithms to construct simplicial complexes and compute persistent homology. +# Source code + +We recommend that users get official releases from [the GUDHI website](https://gudhi.inria.fr/). + +For potential contributors, to fully checkout GUDHI, after cloning the git repository, you may also need to checkout its submodules using +```sh +git submodule update --init +``` + # Compilation and installation To install GUDHI, you can follow the [C++ compilation procedure](https://gudhi.inria.fr/doc/latest/installation.html), the [Python compilation procedure](https://gudhi.inria.fr/python/latest/installation.html), use our [conda-forge package](https://gudhi.inria.fr/conda/), or [go with Docker](https://gudhi.inria.fr/dockerfile/). -- cgit v1.2.3 From 4c8e4549818bb033b148632abba4eae9ae9407c3 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Jan 2020 19:17:14 +0100 Subject: Add pybind11 to Dockerfile_gudhi_installation --- Dockerfile_gudhi_installation | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation index 9fe20730..76b2628b 100644 --- a/Dockerfile_gudhi_installation +++ b/Dockerfile_gudhi_installation @@ -42,6 +42,7 @@ RUN apt-get install -y make \ python3-pip \ python3-pytest \ python3-tk \ + python3-pybind11 \ libfreetype6-dev \ pkg-config \ curl @@ -62,4 +63,4 @@ RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudh && make all test install \ && cmake -DWITH_GUDHI_PYTHON=ON . \ && cd python \ -&& python3 setup.py install \ No newline at end of file +&& python3 setup.py install -- cgit v1.2.3 From a20fdcaf9bf23f88cba2a2c5906d515fd785dc8a Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 24 Jan 2020 12:15:14 +0100 Subject: Modern cmake for boost approach --- src/Alpha_complex/utilities/CMakeLists.txt | 4 ++-- src/Bottleneck_distance/example/CMakeLists.txt | 2 +- src/Cech_complex/benchmark/CMakeLists.txt | 2 +- src/Cech_complex/example/CMakeLists.txt | 2 +- src/Cech_complex/utilities/CMakeLists.txt | 2 +- src/Contraction/example/CMakeLists.txt | 2 +- src/Persistent_cohomology/benchmark/CMakeLists.txt | 2 +- src/Persistent_cohomology/example/CMakeLists.txt | 8 ++++---- src/Rips_complex/utilities/CMakeLists.txt | 8 ++++---- src/Simplex_tree/example/CMakeLists.txt | 2 +- src/Witness_complex/utilities/CMakeLists.txt | 4 ++-- src/cmake/modules/GUDHI_boost_test.cmake | 2 +- src/cmake/modules/GUDHI_third_party_libraries.cmake | 6 +++--- 13 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/Alpha_complex/utilities/CMakeLists.txt b/src/Alpha_complex/utilities/CMakeLists.txt index 57b92942..a3b0cc24 100644 --- a/src/Alpha_complex/utilities/CMakeLists.txt +++ b/src/Alpha_complex/utilities/CMakeLists.txt @@ -2,7 +2,7 @@ project(Alpha_complex_utilities) if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) add_executable (alpha_complex_persistence alpha_complex_persistence.cpp) - target_link_libraries(alpha_complex_persistence ${CGAL_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) + target_link_libraries(alpha_complex_persistence ${CGAL_LIBRARY} Boost::program_options) if (TBB_FOUND) target_link_libraries(alpha_complex_persistence ${TBB_LIBRARIES}) @@ -23,7 +23,7 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) install(TARGETS alpha_complex_persistence DESTINATION bin) add_executable(alpha_complex_3d_persistence alpha_complex_3d_persistence.cpp) - target_link_libraries(alpha_complex_3d_persistence ${CGAL_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) + target_link_libraries(alpha_complex_3d_persistence ${CGAL_LIBRARY} Boost::program_options) if (TBB_FOUND) target_link_libraries(alpha_complex_3d_persistence ${TBB_LIBRARIES}) endif(TBB_FOUND) diff --git a/src/Bottleneck_distance/example/CMakeLists.txt b/src/Bottleneck_distance/example/CMakeLists.txt index 3d65963a..9839c59d 100644 --- a/src/Bottleneck_distance/example/CMakeLists.txt +++ b/src/Bottleneck_distance/example/CMakeLists.txt @@ -13,7 +13,7 @@ endif (NOT CGAL_VERSION VERSION_LESS 4.11.0) if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) add_executable (alpha_rips_persistence_bottleneck_distance alpha_rips_persistence_bottleneck_distance.cpp) - target_link_libraries(alpha_rips_persistence_bottleneck_distance ${Boost_PROGRAM_OPTIONS_LIBRARY}) + target_link_libraries(alpha_rips_persistence_bottleneck_distance Boost::program_options) if (TBB_FOUND) target_link_libraries(alpha_rips_persistence_bottleneck_distance ${TBB_LIBRARIES}) diff --git a/src/Cech_complex/benchmark/CMakeLists.txt b/src/Cech_complex/benchmark/CMakeLists.txt index b7697764..c04bca53 100644 --- a/src/Cech_complex/benchmark/CMakeLists.txt +++ b/src/Cech_complex/benchmark/CMakeLists.txt @@ -5,7 +5,7 @@ project(Cech_complex_benchmark) file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/) add_executable(cech_complex_benchmark cech_complex_benchmark.cpp) -target_link_libraries(cech_complex_benchmark ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY}) +target_link_libraries(cech_complex_benchmark Boost::filesystem) if (TBB_FOUND) target_link_libraries(cech_complex_benchmark ${TBB_LIBRARIES}) diff --git a/src/Cech_complex/example/CMakeLists.txt b/src/Cech_complex/example/CMakeLists.txt index ab391215..98757988 100644 --- a/src/Cech_complex/example/CMakeLists.txt +++ b/src/Cech_complex/example/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 2.6) project(Cech_complex_examples) add_executable ( Cech_complex_example_step_by_step cech_complex_step_by_step.cpp ) -target_link_libraries(Cech_complex_example_step_by_step ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(Cech_complex_example_step_by_step Boost::program_options) if (TBB_FOUND) target_link_libraries(Cech_complex_example_step_by_step ${TBB_LIBRARIES}) endif() diff --git a/src/Cech_complex/utilities/CMakeLists.txt b/src/Cech_complex/utilities/CMakeLists.txt index 30b99729..253d7304 100644 --- a/src/Cech_complex/utilities/CMakeLists.txt +++ b/src/Cech_complex/utilities/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 2.6) project(Cech_complex_utilities) add_executable(cech_persistence cech_persistence.cpp) -target_link_libraries(cech_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(cech_persistence Boost::program_options) if (TBB_FOUND) target_link_libraries(cech_persistence ${TBB_LIBRARIES}) diff --git a/src/Contraction/example/CMakeLists.txt b/src/Contraction/example/CMakeLists.txt index f0dc885d..4740a2d1 100644 --- a/src/Contraction/example/CMakeLists.txt +++ b/src/Contraction/example/CMakeLists.txt @@ -4,7 +4,7 @@ if (NOT CGAL_VERSION VERSION_LESS 4.11.0) add_executable(RipsContraction Rips_contraction.cpp) add_executable(GarlandHeckbert Garland_heckbert.cpp) - target_link_libraries(GarlandHeckbert ${Boost_TIMER_LIBRARY}) + target_link_libraries(GarlandHeckbert Boost::timer) add_test(NAME Contraction_example_tore3D_0.2 COMMAND $ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "0.2") diff --git a/src/Persistent_cohomology/benchmark/CMakeLists.txt b/src/Persistent_cohomology/benchmark/CMakeLists.txt index 2bb3b0c7..f38cc543 100644 --- a/src/Persistent_cohomology/benchmark/CMakeLists.txt +++ b/src/Persistent_cohomology/benchmark/CMakeLists.txt @@ -3,7 +3,7 @@ project(Persistent_cohomology_benchmark) if(GMP_FOUND) if(GMPXX_FOUND) add_executable ( performance_rips_persistence EXCLUDE_FROM_ALL performance_rips_persistence.cpp ) - target_link_libraries(performance_rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY} ${GMPXX_LIBRARIES} ${GMP_LIBRARIES}) + target_link_libraries(performance_rips_persistence Boost::program_options ${GMPXX_LIBRARIES} ${GMP_LIBRARIES}) if (TBB_FOUND) target_link_libraries(performance_rips_persistence ${TBB_LIBRARIES}) endif(TBB_FOUND) diff --git a/src/Persistent_cohomology/example/CMakeLists.txt b/src/Persistent_cohomology/example/CMakeLists.txt index 94ec13c5..4c08cd68 100644 --- a/src/Persistent_cohomology/example/CMakeLists.txt +++ b/src/Persistent_cohomology/example/CMakeLists.txt @@ -5,13 +5,13 @@ add_executable(plain_homology plain_homology.cpp) add_executable(persistence_from_simple_simplex_tree persistence_from_simple_simplex_tree.cpp) add_executable(rips_persistence_step_by_step rips_persistence_step_by_step.cpp) -target_link_libraries(rips_persistence_step_by_step ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(rips_persistence_step_by_step Boost::program_options) add_executable(rips_persistence_via_boundary_matrix rips_persistence_via_boundary_matrix.cpp) -target_link_libraries(rips_persistence_via_boundary_matrix ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(rips_persistence_via_boundary_matrix Boost::program_options) add_executable(persistence_from_file persistence_from_file.cpp) -target_link_libraries(persistence_from_file ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(persistence_from_file Boost::program_options) if (TBB_FOUND) target_link_libraries(plain_homology ${TBB_LIBRARIES}) @@ -43,7 +43,7 @@ if(GMP_FOUND) if(GMPXX_FOUND) add_executable(rips_multifield_persistence rips_multifield_persistence.cpp ) target_link_libraries(rips_multifield_persistence - ${Boost_PROGRAM_OPTIONS_LIBRARY} ${GMPXX_LIBRARIES} ${GMP_LIBRARIES}) + Boost::program_options ${GMPXX_LIBRARIES} ${GMP_LIBRARIES}) if (TBB_FOUND) target_link_libraries(rips_multifield_persistence ${TBB_LIBRARIES}) endif(TBB_FOUND) diff --git a/src/Rips_complex/utilities/CMakeLists.txt b/src/Rips_complex/utilities/CMakeLists.txt index 4b565628..d2448d7b 100644 --- a/src/Rips_complex/utilities/CMakeLists.txt +++ b/src/Rips_complex/utilities/CMakeLists.txt @@ -1,16 +1,16 @@ project(Rips_complex_utilities) add_executable(rips_distance_matrix_persistence rips_distance_matrix_persistence.cpp) -target_link_libraries(rips_distance_matrix_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(rips_distance_matrix_persistence Boost::program_options) add_executable(rips_persistence rips_persistence.cpp) -target_link_libraries(rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(rips_persistence Boost::program_options) add_executable(rips_correlation_matrix_persistence rips_correlation_matrix_persistence.cpp) -target_link_libraries(rips_correlation_matrix_persistence ${Boost_SYSTEM_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(rips_correlation_matrix_persistence Boost::system Boost::program_options) add_executable(sparse_rips_persistence sparse_rips_persistence.cpp) -target_link_libraries(sparse_rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(sparse_rips_persistence Boost::program_options) if (TBB_FOUND) target_link_libraries(rips_distance_matrix_persistence ${TBB_LIBRARIES}) diff --git a/src/Simplex_tree/example/CMakeLists.txt b/src/Simplex_tree/example/CMakeLists.txt index f99b164c..6ba518fa 100644 --- a/src/Simplex_tree/example/CMakeLists.txt +++ b/src/Simplex_tree/example/CMakeLists.txt @@ -32,7 +32,7 @@ endif() if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) add_executable ( Simplex_tree_example_cech_complex_cgal_mini_sphere_3d cech_complex_cgal_mini_sphere_3d.cpp ) - target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d ${Boost_PROGRAM_OPTIONS_LIBRARY} ${CGAL_LIBRARY}) + target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d Boost::program_options ${CGAL_LIBRARY}) if (TBB_FOUND) target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d ${TBB_LIBRARIES}) endif() diff --git a/src/Witness_complex/utilities/CMakeLists.txt b/src/Witness_complex/utilities/CMakeLists.txt index 3ee0c2f6..d986d2d1 100644 --- a/src/Witness_complex/utilities/CMakeLists.txt +++ b/src/Witness_complex/utilities/CMakeLists.txt @@ -4,10 +4,10 @@ project(Witness_complex_utilities) if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) add_executable ( Witness_complex_strong_witness_persistence strong_witness_persistence.cpp ) - target_link_libraries(Witness_complex_strong_witness_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) + target_link_libraries(Witness_complex_strong_witness_persistence Boost::program_options) add_executable ( Witness_complex_weak_witness_persistence weak_witness_persistence.cpp ) - target_link_libraries(Witness_complex_weak_witness_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY}) + target_link_libraries(Witness_complex_weak_witness_persistence Boost::program_options) if (TBB_FOUND) target_link_libraries(Witness_complex_strong_witness_persistence ${TBB_LIBRARIES}) diff --git a/src/cmake/modules/GUDHI_boost_test.cmake b/src/cmake/modules/GUDHI_boost_test.cmake index c3b29883..3b9da78f 100644 --- a/src/cmake/modules/GUDHI_boost_test.cmake +++ b/src/cmake/modules/GUDHI_boost_test.cmake @@ -19,7 +19,7 @@ else (WITH_GUDHI_BOOST_TEST_COVERAGE) endif(WITH_GUDHI_BOOST_TEST_COVERAGE) function(gudhi_add_boost_test unitary_test) - target_link_libraries(${unitary_test} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}) + target_link_libraries(${unitary_test} Boost::unit_test_framework) add_test(NAME ${unitary_test} COMMAND $ ${GUDHI_UT_LOG_FORMAT} ${GUDHI_UT_LOG_SINK} ${GUDHI_UT_LOG_LEVEL} ${GUDHI_UT_REPORT_LEVEL}) diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 24a34150..4d121eca 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -1,6 +1,6 @@ # This files manage third party libraries required by GUDHI -find_package(Boost 1.56.0 REQUIRED COMPONENTS system filesystem unit_test_framework program_options thread) +find_package(Boost 1.56.0 REQUIRED COMPONENTS system filesystem unit_test_framework program_options thread timer) if(NOT Boost_FOUND) message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.") @@ -82,8 +82,8 @@ add_definitions( -DBOOST_ALL_DYN_LINK ) # problem on Mac with boost_system and boost_thread add_definitions( -DBOOST_SYSTEM_NO_DEPRECATED ) -INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS}) -LINK_DIRECTORIES(${Boost_LIBRARY_DIRS}) +#INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS}) +#LINK_DIRECTORIES(${Boost_LIBRARY_DIRS}) message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS}) message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS}) -- cgit v1.2.3 From a604e92c73d4f83281dd62e180a9ec6a70d3bd00 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 24 Jan 2020 14:26:40 +0100 Subject: Remove boost timer as not used --- src/Contraction/example/CMakeLists.txt | 1 - src/cmake/modules/GUDHI_third_party_libraries.cmake | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Contraction/example/CMakeLists.txt b/src/Contraction/example/CMakeLists.txt index 4740a2d1..c5d31aca 100644 --- a/src/Contraction/example/CMakeLists.txt +++ b/src/Contraction/example/CMakeLists.txt @@ -4,7 +4,6 @@ if (NOT CGAL_VERSION VERSION_LESS 4.11.0) add_executable(RipsContraction Rips_contraction.cpp) add_executable(GarlandHeckbert Garland_heckbert.cpp) - target_link_libraries(GarlandHeckbert Boost::timer) add_test(NAME Contraction_example_tore3D_0.2 COMMAND $ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "0.2") diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 4d121eca..10b2b56a 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -1,6 +1,6 @@ # This files manage third party libraries required by GUDHI -find_package(Boost 1.56.0 REQUIRED COMPONENTS system filesystem unit_test_framework program_options thread timer) +find_package(Boost 1.56.0 REQUIRED COMPONENTS system filesystem unit_test_framework program_options thread) if(NOT Boost_FOUND) message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.") -- cgit v1.2.3 From 22c946ecc9594fc496d641b70a19643057295dcf Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 24 Jan 2020 15:48:16 +0100 Subject: Accordingly to the documentation, cmake 3.5 is required for modern boost finding --- CMakeLists.txt | 2 +- src/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5dcc6803..a9f7f989 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.5) project(GUDHIdev) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 561aa049..0e799a3a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.5) project(GUDHI) -- cgit v1.2.3 From 4c0e6e4144dd3cf6da9600fd4b9bbcac5e664b73 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Sun, 26 Jan 2020 02:54:35 -0500 Subject: added extended persistence function --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 71 +++++++++++++++++++++++++++ src/python/gudhi/simplex_tree.pxd | 2 + src/python/gudhi/simplex_tree.pyx | 14 ++++++ 3 files changed, 87 insertions(+) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 76608008..4786b244 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -125,6 +125,8 @@ class Simplex_tree { private: typedef typename Dictionary::iterator Dictionary_it; typedef typename Dictionary_it::value_type Dit_value_t; + double minval_; + double maxval_; struct return_first { Vertex_handle operator()(const Dit_value_t& p_sh) const { @@ -1465,6 +1467,75 @@ class Simplex_tree { } } + /** \brief Retrieve good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. Need extend_filtration to be called first! + * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration and this->get_persistence. + * @return A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. + */ + std::vector>>> convert(const std::vector>>& dgm){ + std::vector>>> new_dgm(4); double x, y; + for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; double px = dgm[i].second.first; double py = dgm[i].second.second; + if(std::isinf(py)) continue; + else{ + if ((px <= -1) & (py <= -1)){x = minval_ + (maxval_-minval_)*(px + 2); y = minval_ + (maxval_-minval_)*(py + 2); new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); } + if ((px >= 1) & (py >= 1)){x = minval_ - (maxval_-minval_)*(px - 2); y = minval_ - (maxval_-minval_)*(py - 2); new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); } + if ((px <= -1) & (py >= 1)){x = minval_ + (maxval_-minval_)*(px + 2); y = minval_ - (maxval_-minval_)*(py - 2); + if (x <= y) new_dgm[2].push_back(std::make_pair(h, std::make_pair(x,y))); + else new_dgm[3].push_back(std::make_pair(h, std::make_pair(x,y))); + } + } + } + return new_dgm; + } + + /** \brief Extend filtration for computing extended persistence. + */ + void extend_filtration() { + + // Compute maximum and minimum of filtration values + int maxvert = -std::numeric_limits::infinity(); + std::vector filt; + for (auto sh : this->complex_simplex_range()) {if (this->dimension(sh) == 0){filt.push_back(this->filtration(sh)); maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert);}} + minval_ = *std::min_element(filt.begin(), filt.end()); + maxval_ = *std::max_element(filt.begin(), filt.end()); + maxvert += 1; + + // Compute vectors of integers corresponding to the Simplex handles + std::vector > splxs; + for (auto sh : this->complex_simplex_range()) { + std::vector vr; for (auto vh : this->simplex_vertex_range(sh)){vr.push_back(vh);} + splxs.push_back(vr); + } + + // Add point for coning the simplicial complex + int count = this->num_simplices(); + std::vector cone; cone.push_back(maxvert); auto ins = this->insert_simplex(cone, -3); this->assign_key(ins.first, count); count++; + + // For each simplex + for (auto vr : splxs){ + // Create cone on simplex + auto sh = this->find(vr); vr.push_back(maxvert); + if (this->dimension(sh) == 0){ + // Assign ascending value between -2 and -1 to vertex + double v = this->filtration(sh); + this->assign_filtration(sh, -2 + (v-minval_)/(maxval_-minval_)); + // Assign descending value between 1 and 2 to cone on vertex + auto ins = this->insert_simplex(vr, 2 - (v-minval_)/(maxval_-minval_)); + this->assign_key(ins.first, count); + } + else{ + // Assign value -3 to simplex and cone on simplex + this->assign_filtration(sh, -3); + auto ins = this->insert_simplex(vr, -3); + this->assign_key(ins.first, count); + } + count++; + } + + this->make_filtration_non_decreasing(); this->initialize_filtration(); + + } + + private: Vertex_handle null_vertex_; /** \brief Total number of simplices in the complex, without the empty simplex.*/ diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 1066d44b..39f2a45f 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -43,6 +43,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": void remove_maximal_simplex(vector[int] simplex) bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() + void extend_filtration() + vector[vector[pair[int, pair[double, double]]]] convert(vector[pair[int, pair[double, double]]]) cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index b18627c4..cfab14f4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -386,6 +386,20 @@ cdef class SimplexTree: """ return self.get_ptr().make_filtration_non_decreasing() + def extend_filtration(self): + """ This function extends filtration for computing extended persistence. + """ + return self.get_ptr().extend_filtration() + + def convert(self, dgm): + """This function retrieves good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. Need extend_filtration to be called first! + + :param dgm: Persistence diagram obtained after calling this->extend_filtration and this->get_persistence. + :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. + """ + return self.get_ptr().convert(dgm) + + def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): """This function returns the persistence of the simplicial complex. -- cgit v1.2.3 From bf4b4eeda9762ed9e99c2b24f19331fa0111fcfe Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 27 Jan 2020 10:43:16 +0100 Subject: Code review: Use std::clog instead of std::cout --- .../benchmark/Alpha_complex_3d_benchmark.cpp | 50 ++--- .../example/Alpha_complex_3d_from_points.cpp | 12 +- .../example/Alpha_complex_from_off.cpp | 2 +- .../example/Alpha_complex_from_points.cpp | 12 +- .../example/Fast_alpha_complex_from_off.cpp | 2 +- .../Weighted_alpha_complex_3d_from_points.cpp | 12 +- src/Alpha_complex/include/gudhi/Alpha_complex.h | 32 +-- src/Alpha_complex/include/gudhi/Alpha_complex_3d.h | 24 +- .../test/Alpha_complex_3d_unit_test.cpp | 34 +-- src/Alpha_complex/test/Alpha_complex_unit_test.cpp | 74 +++--- .../test/Periodic_alpha_complex_3d_unit_test.cpp | 16 +- .../test/Weighted_alpha_complex_3d_unit_test.cpp | 20 +- ...eighted_periodic_alpha_complex_3d_unit_test.cpp | 20 +- .../utilities/alpha_complex_3d_persistence.cpp | 28 +-- .../utilities/alpha_complex_persistence.cpp | 30 +-- .../example/Random_bitmap_cubical_complex.cpp | 2 +- .../include/gudhi/Bitmap_cubical_complex.h | 56 ++--- .../include/gudhi/Bitmap_cubical_complex_base.h | 36 +-- ...cal_complex_periodic_boundary_conditions_base.h | 10 +- src/Bitmap_cubical_complex/test/Bitmap_test.cpp | 14 +- .../utilities/cubical_complex_persistence.cpp | 4 +- .../periodic_cubical_complex_persistence.cpp | 4 +- .../doc/Intro_bottleneck_distance.h | 2 +- .../alpha_rips_persistence_bottleneck_distance.cpp | 32 +-- .../example/bottleneck_basic_example.cpp | 4 +- .../utilities/bottleneck_distance.cpp | 4 +- .../benchmark/cech_complex_benchmark.cpp | 24 +- .../example/cech_complex_example_from_points.cpp | 12 +- .../example/cech_complex_step_by_step.cpp | 28 +-- .../include/gudhi/Cech_complex_blocker.h | 4 +- src/Cech_complex/test/test_cech_complex.cpp | 50 ++--- src/Cech_complex/utilities/cech_persistence.cpp | 28 +-- src/Contraction/example/Garland_heckbert.cpp | 6 +- src/Contraction/example/Rips_contraction.cpp | 10 +- src/Contraction/include/gudhi/Edge_contraction.h | 10 +- src/GudhUI/model/Model.h | 62 +++--- src/GudhUI/utils/Bar_code_persistence.h | 4 +- src/GudhUI/utils/Critical_points.h | 2 +- src/GudhUI/utils/Rips_builder.h | 6 +- src/GudhUI/view/View_parameter.h | 6 +- src/Hasse_complex/include/gudhi/Hasse_complex.h | 6 +- src/Nerve_GIC/example/CoordGIC.cpp | 10 +- src/Nerve_GIC/example/FuncGIC.cpp | 10 +- src/Nerve_GIC/include/gudhi/GIC.h | 56 ++--- src/Nerve_GIC/utilities/Nerve.cpp | 10 +- src/Nerve_GIC/utilities/VoronoiGIC.cpp | 10 +- .../example/persistence_heat_maps.cpp | 8 +- .../example/persistence_intervals.cpp | 40 ++-- .../example/persistence_landscape.cpp | 24 +- .../example/persistence_landscape_on_grid.cpp | 22 +- .../example/persistence_vectors.cpp | 10 +- .../example/sliced_wasserstein.cpp | 8 +- .../include/gudhi/Persistence_heat_maps.h | 64 +++--- .../include/gudhi/Persistence_intervals.h | 56 ++--- .../include/gudhi/Persistence_landscape.h | 222 +++++++++--------- .../include/gudhi/Persistence_landscape_on_grid.h | 122 +++++----- .../include/gudhi/Persistence_vectors.h | 30 +-- .../include/gudhi/read_persistence_from_file.h | 16 +- .../average_persistence_heat_maps.cpp | 6 +- .../compute_distance_of_persistence_heat_maps.cpp | 10 +- ...ute_scalar_product_of_persistence_heat_maps.cpp | 10 +- ...h_m_weighted_by_arctan_of_their_persistence.cpp | 6 +- ...te_p_h_m_weighted_by_distance_from_diagonal.cpp | 6 +- ...ate_p_h_m_weighted_by_squared_diag_distance.cpp | 6 +- .../create_persistence_heat_maps.cpp | 6 +- .../persistence_heat_maps/create_pssk.cpp | 6 +- .../plot_persistence_heat_map.cpp | 4 +- ...te_birth_death_range_in_persistence_diagram.cpp | 8 +- .../compute_bottleneck_distance.cpp | 10 +- .../compute_number_of_dominant_intervals.cpp | 8 +- .../plot_histogram_of_intervals_lengths.cpp | 6 +- .../plot_persistence_Betti_numbers.cpp | 4 +- .../plot_persistence_intervals.cpp | 2 +- .../persistence_landscapes/average_landscapes.cpp | 6 +- .../compute_distance_of_landscapes.cpp | 10 +- .../compute_scalar_product_of_landscapes.cpp | 10 +- .../persistence_landscapes/create_landscapes.cpp | 6 +- .../persistence_landscapes/plot_landscapes.cpp | 4 +- .../average_landscapes_on_grid.cpp | 6 +- .../compute_distance_of_landscapes_on_grid.cpp | 10 +- ...ompute_scalar_product_of_landscapes_on_grid.cpp | 10 +- .../create_landscapes_on_grid.cpp | 6 +- .../plot_landscapes_on_grid.cpp | 4 +- .../average_persistence_vectors.cpp | 6 +- .../compute_distance_of_persistence_vectors.cpp | 14 +- ...mpute_scalar_product_of_persistence_vectors.cpp | 10 +- .../create_persistence_vectors.cpp | 6 +- .../plot_persistence_vectors.cpp | 4 +- .../benchmark/performance_rips_persistence.cpp | 36 +-- .../example/custom_persistence_sort.cpp | 26 +-- .../example/persistence_from_file.cpp | 40 ++-- .../persistence_from_simple_simplex_tree.cpp | 46 ++-- .../example/plain_homology.cpp | 8 +- .../example/rips_multifield_persistence.cpp | 34 +-- .../example/rips_persistence_step_by_step.cpp | 28 +-- .../rips_persistence_via_boundary_matrix.cpp | 28 +-- .../include/gudhi/Persistent_cohomology.h | 2 +- .../test/betti_numbers_unit_test.cpp | 30 +-- .../test/persistent_cohomology_unit_test.cpp | 28 +-- ...persistent_cohomology_unit_test_multi_field.cpp | 8 +- ...e_one_skeleton_rips_from_correlation_matrix.cpp | 12 +- ...mple_one_skeleton_rips_from_distance_matrix.cpp | 12 +- .../example_one_skeleton_rips_from_points.cpp | 12 +- ..._rips_complex_from_csv_distance_matrix_file.cpp | 2 +- .../example/example_rips_complex_from_off_file.cpp | 2 +- src/Rips_complex/example/example_sparse_rips.cpp | 2 +- src/Rips_complex/test/test_rips_complex.cpp | 108 ++++----- .../rips_correlation_matrix_persistence.cpp | 28 +-- .../utilities/rips_distance_matrix_persistence.cpp | 28 +-- src/Rips_complex/utilities/rips_persistence.cpp | 28 +-- .../utilities/sparse_rips_persistence.cpp | 28 +-- .../example/cech_complex_cgal_mini_sphere_3d.cpp | 30 +-- ...e_alpha_shapes_3_simplex_tree_from_off_file.cpp | 68 +++--- .../example/graph_expansion_with_blocker.cpp | 22 +- src/Simplex_tree/example/mini_simplex_tree.cpp | 4 +- src/Simplex_tree/example/simple_simplex_tree.cpp | 138 ++++++------ .../example/simplex_tree_from_cliques_of_graph.cpp | 56 ++--- src/Simplex_tree/include/gudhi/Simplex_tree.h | 12 +- .../test/simplex_tree_ctor_and_move_unit_test.cpp | 32 +-- .../simplex_tree_graph_expansion_unit_test.cpp | 88 ++++---- .../simplex_tree_iostream_operator_unit_test.cpp | 46 ++-- .../test/simplex_tree_remove_unit_test.cpp | 154 ++++++------- src/Simplex_tree/test/simplex_tree_unit_test.cpp | 248 ++++++++++----------- .../example/Skeleton_blocker_from_simplices.cpp | 16 +- .../example/Skeleton_blocker_iteration.cpp | 8 +- .../example/Skeleton_blocker_link.cpp | 10 +- .../include/gudhi/Skeleton_blocker.h | 20 +- .../test/test_skeleton_blocker_complex.cpp | 88 ++++---- .../test_skeleton_blocker_geometric_complex.cpp | 16 +- .../test/test_skeleton_blocker_simplifiable.cpp | 92 ++++---- .../example/example_spatial_searching.cpp | 20 +- .../example/example_choose_n_farthest_points.cpp | 4 +- src/Subsampling/example/example_custom_kernel.cpp | 6 +- .../example/example_pick_n_random_points.cpp | 4 +- .../example/example_sparsify_point_set.cpp | 4 +- src/Subsampling/test/test_pick_n_random_points.cpp | 4 +- src/Subsampling/test/test_sparsify_point_set.cpp | 6 +- .../test/test_tangential_complex.cpp | 10 +- src/Toplex_map/benchmark/benchmark_tm.cpp | 20 +- src/Toplex_map/example/simple_toplex_map.cpp | 54 ++--- src/Toplex_map/test/lazy_toplex_map_unit_test.cpp | 76 +++---- src/Toplex_map/test/toplex_map_unit_test.cpp | 50 ++--- .../example/example_nearest_landmark_table.cpp | 2 +- .../example/example_strong_witness_complex_off.cpp | 8 +- .../example/example_witness_complex_off.cpp | 8 +- .../example/example_witness_complex_sphere.cpp | 8 +- .../test/test_euclidean_simple_witness_complex.cpp | 16 +- .../test/test_simple_witness_complex.cpp | 4 +- .../utilities/strong_witness_persistence.cpp | 32 +-- .../utilities/weak_witness_persistence.cpp | 32 +-- src/Witness_complex/utilities/witnesscomplex.md | 4 +- .../Graph_simplicial_complex_benchmark.cpp | 8 +- .../example/example_CGAL_3D_points_off_reader.cpp | 2 +- .../example/example_CGAL_points_off_reader.cpp | 6 +- src/common/include/gudhi/Clock.h | 4 +- src/common/include/gudhi/Debug_utils.h | 10 +- src/common/include/gudhi/Points_3D_off_io.h | 8 +- src/common/include/gudhi/Points_off_io.h | 8 +- src/common/include/gudhi/Unitary_tests_utils.h | 4 +- src/common/include/gudhi/distance_functions.h | 2 +- src/common/include/gudhi/reader_utils.h | 16 +- .../include/gudhi/writing_persistence_to_file.h | 2 +- src/common/test/test_distance_matrix_reader.cpp | 16 +- .../test/test_persistence_intervals_reader.cpp | 124 +++++------ 164 files changed, 2014 insertions(+), 2014 deletions(-) diff --git a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp index 99ad94b9..e7d85686 100644 --- a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp +++ b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp @@ -19,7 +19,7 @@ std::ofstream results_csv("results.csv"); template void benchmark_points_on_torus_dD(const std::string& msg) { - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"" << msg << "\";" << std::endl; results_csv << "\"nb_points\";" @@ -29,7 +29,7 @@ void benchmark_points_on_torus_dD(const std::string& msg) { using K = CGAL::Epick_d>; for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) { - std::cout << " Alpha complex dD on torus with " << nb_points << " points." << std::endl; + std::clog << " Alpha complex dD on torus with " << nb_points << " points." << std::endl; std::vector points_on_torus = Gudhi::generate_points_on_torus_3D(nb_points, 1.0, 0.5); std::vector points; @@ -41,26 +41,26 @@ void benchmark_points_on_torus_dD(const std::string& msg) { ac_create_clock.begin(); Gudhi::alpha_complex::Alpha_complex alpha_complex_from_points(points); ac_create_clock.end(); - std::cout << ac_create_clock; + std::clog << ac_create_clock; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_points_on_torus_dD - complex creation"); st_create_clock.begin(); alpha_complex_from_points.create_complex(complex); st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_points_on_torus_dD - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_points_on_torus_dD - nb simplices = " << complex.num_simplices() << std::endl; } } template void benchmark_points_on_torus_3D(const std::string& msg) { using K = CGAL::Epick_d>; - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"" << msg << "\";" << std::endl; results_csv << "\"nb_points\";" @@ -69,7 +69,7 @@ void benchmark_points_on_torus_3D(const std::string& msg) { << "\"complex_creation_time(sec.)\";" << std::endl; for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) { - std::cout << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl; + std::clog << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl; std::vector points_on_torus = Gudhi::generate_points_on_torus_3D(nb_points, 1.0, 0.5); std::vector points; @@ -81,19 +81,19 @@ void benchmark_points_on_torus_3D(const std::string& msg) { ac_create_clock.begin(); Alpha_complex_3d alpha_complex_from_points(points); ac_create_clock.end(); - std::cout << ac_create_clock; + std::clog << ac_create_clock; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_points_on_torus_3D - complex creation"); st_create_clock.begin(); alpha_complex_from_points.create_complex(complex); st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl; } } @@ -101,7 +101,7 @@ template void benchmark_weighted_points_on_torus_3D(const std::string& msg) { using K = CGAL::Epick_d>; - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"" << msg << "\";" << std::endl; results_csv << "\"nb_points\";" @@ -112,7 +112,7 @@ void benchmark_weighted_points_on_torus_3D(const std::string& msg) { CGAL::Random random(8); for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) { - std::cout << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl; + std::clog << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl; std::vector points_on_torus = Gudhi::generate_points_on_torus_3D(nb_points, 1.0, 0.5); using Point = typename Weighted_alpha_complex_3d::Bare_point_3; @@ -128,25 +128,25 @@ void benchmark_weighted_points_on_torus_3D(const std::string& msg) { ac_create_clock.begin(); Weighted_alpha_complex_3d alpha_complex_from_points(points); ac_create_clock.end(); - std::cout << ac_create_clock; + std::clog << ac_create_clock; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_weighted_points_on_torus_3D - complex creation"); st_create_clock.begin(); alpha_complex_from_points.create_complex(complex); st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_weighted_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_weighted_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl; } } template void benchmark_periodic_points(const std::string& msg) { - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"" << msg << "\";" << std::endl; results_csv << "\"nb_points\";" @@ -157,7 +157,7 @@ void benchmark_periodic_points(const std::string& msg) { CGAL::Random random(8); for (double nb_points = 10.; nb_points <= 40.; nb_points += 10.) { - std::cout << " Periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." << std::endl; + std::clog << " Periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." << std::endl; using Point = typename Periodic_alpha_complex_3d::Point_3; std::vector points; @@ -174,25 +174,25 @@ void benchmark_periodic_points(const std::string& msg) { ac_create_clock.begin(); Periodic_alpha_complex_3d alpha_complex_from_points(points, 0., 0., 0., nb_points, nb_points, nb_points); ac_create_clock.end(); - std::cout << ac_create_clock; + std::clog << ac_create_clock; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_periodic_points - complex creation"); st_create_clock.begin(); alpha_complex_from_points.create_complex(complex); st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << nb_points * nb_points * nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_periodic_points - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_periodic_points - nb simplices = " << complex.num_simplices() << std::endl; } } template void benchmark_weighted_periodic_points(const std::string& msg) { - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"" << msg << "\";" << std::endl; results_csv << "\"nb_points\";" @@ -203,7 +203,7 @@ void benchmark_weighted_periodic_points(const std::string& msg) { CGAL::Random random(8); for (double nb_points = 10.; nb_points <= 40.; nb_points += 10.) { - std::cout << " Weighted periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." + std::clog << " Weighted periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." << std::endl; using Point = typename Weighted_periodic_alpha_complex_3d::Bare_point_3; @@ -224,19 +224,19 @@ void benchmark_weighted_periodic_points(const std::string& msg) { ac_create_clock.begin(); Weighted_periodic_alpha_complex_3d alpha_complex_from_points(points, 0., 0., 0., nb_points, nb_points, nb_points); ac_create_clock.end(); - std::cout << ac_create_clock; + std::clog << ac_create_clock; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_weighted_periodic_points - complex creation"); st_create_clock.begin(); alpha_complex_from_points.create_complex(complex); st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << nb_points * nb_points * nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_weighted_periodic_points - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_weighted_periodic_points - nb simplices = " << complex.num_simplices() << std::endl; } } diff --git a/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp b/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp index 0e359a27..a2c85138 100644 --- a/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp +++ b/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp @@ -38,18 +38,18 @@ int main(int argc, char **argv) { // ---------------------------------------------------------------------------- // Display information about the alpha complex // ---------------------------------------------------------------------------- - std::cout << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() + std::clog << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : simplex.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : simplex.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " + std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << std::endl; } } return 0; diff --git a/src/Alpha_complex/example/Alpha_complex_from_off.cpp b/src/Alpha_complex/example/Alpha_complex_from_off.cpp index 220a66de..dba1710e 100644 --- a/src/Alpha_complex/example/Alpha_complex_from_off.cpp +++ b/src/Alpha_complex/example/Alpha_complex_from_off.cpp @@ -30,7 +30,7 @@ int main(int argc, char **argv) { ouput_file_stream.open(std::string(argv[3])); streambuffer = ouput_file_stream.rdbuf(); } else { - streambuffer = std::cout.rdbuf(); + streambuffer = std::clog.rdbuf(); } Gudhi::Simplex_tree<> simplex; diff --git a/src/Alpha_complex/example/Alpha_complex_from_points.cpp b/src/Alpha_complex/example/Alpha_complex_from_points.cpp index 6526ca3a..c79535bf 100644 --- a/src/Alpha_complex/example/Alpha_complex_from_points.cpp +++ b/src/Alpha_complex/example/Alpha_complex_from_points.cpp @@ -35,18 +35,18 @@ int main() { // ---------------------------------------------------------------------------- // Display information about the alpha complex // ---------------------------------------------------------------------------- - std::cout << "Alpha complex is of dimension " << simplex.dimension() << + std::clog << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : simplex.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : simplex.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << simplex.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] "; + std::clog << std::endl; } } return 0; diff --git a/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp index f181005a..64728470 100644 --- a/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp +++ b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp @@ -35,7 +35,7 @@ int main(int argc, char **argv) { ouput_file_stream.open(std::string(argv[3])); streambuffer = ouput_file_stream.rdbuf(); } else { - streambuffer = std::cout.rdbuf(); + streambuffer = std::clog.rdbuf(); } Gudhi::Simplex_tree<> simplex; diff --git a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp index fcf80802..c044194e 100644 --- a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp +++ b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp @@ -34,18 +34,18 @@ int main(int argc, char **argv) { // ---------------------------------------------------------------------------- // Display information about the alpha complex // ---------------------------------------------------------------------------- - std::cout << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() + std::clog << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : simplex.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : simplex.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " + std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << std::endl; } } return 0; diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index f2a05e95..0839ae6c 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -237,7 +237,7 @@ class Alpha_complex { for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) { if (!triangulation_->is_infinite(*vit)) { #ifdef DEBUG_TRACES - std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl; + std::clog << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl; #endif // DEBUG_TRACES vertex_handle_to_iterator_[vit->data()] = vit; } @@ -296,19 +296,19 @@ class Alpha_complex { ++cit) { Vector_vertex vertexVector; #ifdef DEBUG_TRACES - std::cout << "Simplex_tree insertion "; + std::clog << "Simplex_tree insertion "; #endif // DEBUG_TRACES for (auto vit = cit->vertices_begin(); vit != cit->vertices_end(); ++vit) { if (*vit != nullptr) { #ifdef DEBUG_TRACES - std::cout << " " << (*vit)->data(); + std::clog << " " << (*vit)->data(); #endif // DEBUG_TRACES // Vector of vertex construction for simplex_tree structure vertexVector.push_back((*vit)->data()); } } #ifdef DEBUG_TRACES - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES // Insert each simplex and its subfaces in the simplex tree - filtration is NaN complex.insert_simplex_and_subfaces(vertexVector, std::numeric_limits::quiet_NaN()); @@ -327,16 +327,16 @@ class Alpha_complex { if (decr_dim == f_simplex_dim) { pointVector.clear(); #ifdef DEBUG_TRACES - std::cout << "Sigma of dim " << decr_dim << " is"; + std::clog << "Sigma of dim " << decr_dim << " is"; #endif // DEBUG_TRACES for (auto vertex : complex.simplex_vertex_range(f_simplex)) { pointVector.push_back(get_point(vertex)); #ifdef DEBUG_TRACES - std::cout << " " << vertex; + std::clog << " " << vertex; #endif // DEBUG_TRACES } #ifdef DEBUG_TRACES - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma) if (std::isnan(complex.filtration(f_simplex))) { @@ -355,7 +355,7 @@ class Alpha_complex { } complex.assign_filtration(f_simplex, alpha_complex_filtration); #ifdef DEBUG_TRACES - std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl; + std::clog << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl; #endif // DEBUG_TRACES } // No need to propagate further, unweighted points all have value 0 @@ -387,13 +387,13 @@ class Alpha_complex { // ### Foreach Tau face of Sigma for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) { #ifdef DEBUG_TRACES - std::cout << " | --------------------------------------------------\n"; - std::cout << " | Tau "; + std::clog << " | --------------------------------------------------\n"; + std::clog << " | Tau "; for (auto vertex : complex.simplex_vertex_range(f_boundary)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << "is a face of Sigma\n"; - std::cout << " | isnan(complex.filtration(Tau)=" << std::isnan(complex.filtration(f_boundary)) << std::endl; + std::clog << "is a face of Sigma\n"; + std::clog << " | isnan(complex.filtration(Tau)=" << std::isnan(complex.filtration(f_boundary)) << std::endl; #endif // DEBUG_TRACES // ### If filt(Tau) is not NaN if (!std::isnan(complex.filtration(f_boundary))) { @@ -402,7 +402,7 @@ class Alpha_complex { complex.filtration(f_simplex)); complex.assign_filtration(f_boundary, alpha_complex_filtration); #ifdef DEBUG_TRACES - std::cout << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << complex.filtration(f_boundary) << std::endl; + std::clog << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << complex.filtration(f_boundary) << std::endl; #endif // DEBUG_TRACES // ### Else } else { @@ -432,7 +432,7 @@ class Alpha_complex { bool is_gab = is_gabriel(pointVector.begin(), pointVector.end(), point_for_gabriel) != CGAL::ON_BOUNDED_SIDE; #ifdef DEBUG_TRACES - std::cout << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl; + std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl; #endif // DEBUG_TRACES // ### If Tau is not Gabriel of Sigma if (false == is_gab) { @@ -440,7 +440,7 @@ class Alpha_complex { Filtration_value alpha_complex_filtration = complex.filtration(f_simplex); complex.assign_filtration(f_boundary, alpha_complex_filtration); #ifdef DEBUG_TRACES - std::cout << " | filt(Tau) = filt(Sigma) = " << complex.filtration(f_boundary) << std::endl; + std::clog << " | filt(Tau) = filt(Sigma) = " << complex.filtration(f_boundary) << std::endl; #endif // DEBUG_TRACES } } diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h index 7f96c94c..c40beba0 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h @@ -472,7 +472,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ alpha_shape_3_ptr_->filtration_with_alpha_values(dispatcher); #ifdef DEBUG_TRACES - std::cout << "filtration_with_alpha_values returns : " << objects.size() << " objects" << std::endl; + std::clog << "filtration_with_alpha_values returns : " << objects.size() << " objects" << std::endl; #endif // DEBUG_TRACES using Alpha_value_iterator = typename std::vector::const_iterator; @@ -484,7 +484,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ if (const Cell_handle* cell = CGAL::object_cast(&object_iterator)) { for (auto i = 0; i < 4; i++) { #ifdef DEBUG_TRACES - std::cout << "from cell[" << i << "] - Point coordinates (" << (*cell)->vertex(i)->point() << ")" + std::clog << "from cell[" << i << "] - Point coordinates (" << (*cell)->vertex(i)->point() << ")" << std::endl; #endif // DEBUG_TRACES vertex_list.push_back((*cell)->vertex(i)); @@ -496,7 +496,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ for (auto i = 0; i < 4; i++) { if ((*facet).second != i) { #ifdef DEBUG_TRACES - std::cout << "from facet=[" << i << "] - Point coordinates (" << (*facet).first->vertex(i)->point() << ")" + std::clog << "from facet=[" << i << "] - Point coordinates (" << (*facet).first->vertex(i)->point() << ")" << std::endl; #endif // DEBUG_TRACES vertex_list.push_back((*facet).first->vertex(i)); @@ -508,7 +508,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ } else if (const Edge* edge = CGAL::object_cast(&object_iterator)) { for (auto i : {(*edge).second, (*edge).third}) { #ifdef DEBUG_TRACES - std::cout << "from edge[" << i << "] - Point coordinates (" << (*edge).first->vertex(i)->point() << ")" + std::clog << "from edge[" << i << "] - Point coordinates (" << (*edge).first->vertex(i)->point() << ")" << std::endl; #endif // DEBUG_TRACES vertex_list.push_back((*edge).first->vertex(i)); @@ -519,7 +519,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ } else if (const Alpha_vertex_handle* vertex = CGAL::object_cast(&object_iterator)) { #ifdef DEBUG_TRACES count_vertices++; - std::cout << "from vertex - Point coordinates (" << (*vertex)->point() << ")" << std::endl; + std::clog << "from vertex - Point coordinates (" << (*vertex)->point() << ")" << std::endl; #endif // DEBUG_TRACES vertex_list.push_back((*vertex)); } @@ -531,7 +531,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ // alpha shape not found Complex_vertex_handle vertex = map_cgal_simplex_tree.size(); #ifdef DEBUG_TRACES - std::cout << "Point (" << the_alpha_shape_vertex->point() << ") not found - insert new vertex id " << vertex + std::clog << "Point (" << the_alpha_shape_vertex->point() << ") not found - insert new vertex id " << vertex << std::endl; #endif // DEBUG_TRACES the_simplex.push_back(vertex); @@ -540,7 +540,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ // alpha shape found Complex_vertex_handle vertex = the_map_iterator->second; #ifdef DEBUG_TRACES - std::cout << "Point (" << the_alpha_shape_vertex->point() << ") found as vertex id " << vertex << std::endl; + std::clog << "Point (" << the_alpha_shape_vertex->point() << ") found as vertex id " << vertex << std::endl; #endif // DEBUG_TRACES the_simplex.push_back(vertex); } @@ -549,7 +549,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ Filtration_value filtr = Value_from_iterator::perform(alpha_value_iterator); #ifdef DEBUG_TRACES - std::cout << "filtration = " << filtr << std::endl; + std::clog << "filtration = " << filtr << std::endl; #endif // DEBUG_TRACES complex.insert_simplex(the_simplex, static_cast(filtr)); GUDHI_CHECK(alpha_value_iterator != alpha_values.end(), "CGAL provided more simplices than values"); @@ -557,10 +557,10 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_ } #ifdef DEBUG_TRACES - std::cout << "vertices \t" << count_vertices << std::endl; - std::cout << "edges \t\t" << count_edges << std::endl; - std::cout << "facets \t\t" << count_facets << std::endl; - std::cout << "cells \t\t" << count_cells << std::endl; + std::clog << "vertices \t" << count_vertices << std::endl; + std::clog << "edges \t\t" << count_edges << std::endl; + std::clog << "facets \t\t" << count_facets << std::endl; + std::clog << "cells \t\t" << count_cells << std::endl; #endif // DEBUG_TRACES // -------------------------------------------------------------------------------------------- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension diff --git a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp index cd698a27..a4ecb6ad 100644 --- a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp +++ b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp @@ -54,7 +54,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { // ----------------- // Fast version // ----------------- - std::cout << "Fast alpha complex 3d" << std::endl; + std::clog << "Fast alpha complex 3d" << std::endl; std::vector points = get_points(); Fast_alpha_complex_3d alpha_complex(points); @@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { // ----------------- // Exact version // ----------------- - std::cout << "Exact alpha complex 3d" << std::endl; + std::clog << "Exact alpha complex 3d" << std::endl; std::vector exact_points = get_points(); Exact_alpha_complex_3d exact_alpha_complex(exact_points); @@ -105,13 +105,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { // --------------------- // Compare both versions // --------------------- - std::cout << "Exact Alpha complex 3d is of dimension " << exact_stree.dimension() << " - Fast is " + std::clog << "Exact Alpha complex 3d is of dimension " << exact_stree.dimension() << " - Fast is " << stree.dimension() << std::endl; BOOST_CHECK(exact_stree.dimension() == stree.dimension()); - std::cout << "Exact Alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Fast is " + std::clog << "Exact Alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Fast is " << stree.num_simplices() << std::endl; BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices()); - std::cout << "Exact Alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Fast is " + std::clog << "Exact Alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Fast is " << stree.num_vertices() << std::endl; BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices()); @@ -119,18 +119,18 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { while (sh != stree.filtration_simplex_range().end()) { std::vector simplex; std::vector exact_simplex; - std::cout << "Fast ( "; + std::clog << "Fast ( "; for (auto vertex : stree.simplex_vertex_range(*sh)) { simplex.push_back(vertex); - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> [" << stree.filtration(*sh) << "] "; + std::clog << ") -> [" << stree.filtration(*sh) << "] "; // Find it in the exact structure auto sh_exact = exact_stree.find(simplex); BOOST_CHECK(sh_exact != exact_stree.null_simplex()); - std::cout << " versus [" << exact_stree.filtration(sh_exact) << "] " << std::endl; + std::clog << " versus [" << exact_stree.filtration(sh_exact) << "] " << std::endl; // Exact and non-exact version is not exactly the same due to float comparison GUDHI_TEST_FLOAT_EQUALITY_CHECK(exact_stree.filtration(sh_exact), stree.filtration(*sh)); @@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { // ----------------- // Safe version // ----------------- - std::cout << "Safe alpha complex 3d" << std::endl; + std::clog << "Safe alpha complex 3d" << std::endl; std::vector safe_points = get_points(); Safe_alpha_complex_3d safe_alpha_complex(safe_points); @@ -165,13 +165,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { // --------------------- // Compare both versions // --------------------- - std::cout << "Safe Alpha complex 3d is of dimension " << safe_stree.dimension() << " - Fast is " + std::clog << "Safe Alpha complex 3d is of dimension " << safe_stree.dimension() << " - Fast is " << stree.dimension() << std::endl; BOOST_CHECK(safe_stree.dimension() == stree.dimension()); - std::cout << "Safe Alpha complex 3d num_simplices " << safe_stree.num_simplices() << " - Fast is " + std::clog << "Safe Alpha complex 3d num_simplices " << safe_stree.num_simplices() << " - Fast is " << stree.num_simplices() << std::endl; BOOST_CHECK(safe_stree.num_simplices() == stree.num_simplices()); - std::cout << "Safe Alpha complex 3d num_vertices " << safe_stree.num_vertices() << " - Fast is " + std::clog << "Safe Alpha complex 3d num_vertices " << safe_stree.num_vertices() << " - Fast is " << stree.num_vertices() << std::endl; BOOST_CHECK(safe_stree.num_vertices() == stree.num_vertices()); @@ -179,18 +179,18 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) { while (safe_sh != stree.filtration_simplex_range().end()) { std::vector simplex; std::vector exact_simplex; - std::cout << "Fast ( "; + std::clog << "Fast ( "; for (auto vertex : stree.simplex_vertex_range(*safe_sh)) { simplex.push_back(vertex); - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> [" << stree.filtration(*safe_sh) << "] "; + std::clog << ") -> [" << stree.filtration(*safe_sh) << "] "; // Find it in the exact structure auto sh_exact = safe_stree.find(simplex); BOOST_CHECK(sh_exact != safe_stree.null_simplex()); - std::cout << " versus [" << safe_stree.filtration(sh_exact) << "] " << std::endl; + std::clog << " versus [" << safe_stree.filtration(sh_exact) << "] " << std::endl; // Exact and non-exact version is not exactly the same due to float comparison GUDHI_TEST_FLOAT_EQUALITY_CHECK(safe_stree.filtration(sh_exact), stree.filtration(*safe_sh), 1e-15); diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp index 27b671dd..da1d8004 100644 --- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp +++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp @@ -48,7 +48,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of // ---------------------------------------------------------------------------- std::string off_file_name("alphacomplexdoc.off"); double max_alpha_square_value = 60.0; - std::cout << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" << + std::clog << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" << max_alpha_square_value << "==========" << std::endl; Gudhi::alpha_complex::Alpha_complex alpha_complex_from_file(off_file_name); @@ -56,29 +56,29 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of Gudhi::Simplex_tree<> simplex_tree_60; BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_60, max_alpha_square_value)); - std::cout << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl; + std::clog << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl; BOOST_CHECK(simplex_tree_60.dimension() == 2); - std::cout << "simplex_tree_60.num_vertices()=" << simplex_tree_60.num_vertices() << std::endl; + std::clog << "simplex_tree_60.num_vertices()=" << simplex_tree_60.num_vertices() << std::endl; BOOST_CHECK(simplex_tree_60.num_vertices() == 7); - std::cout << "simplex_tree_60.num_simplices()=" << simplex_tree_60.num_simplices() << std::endl; + std::clog << "simplex_tree_60.num_simplices()=" << simplex_tree_60.num_simplices() << std::endl; BOOST_CHECK(simplex_tree_60.num_simplices() == 25); max_alpha_square_value = 59.0; - std::cout << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" << + std::clog << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" << max_alpha_square_value << "==========" << std::endl; Gudhi::Simplex_tree<> simplex_tree_59; BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_59, max_alpha_square_value)); - std::cout << "simplex_tree_59.dimension()=" << simplex_tree_59.dimension() << std::endl; + std::clog << "simplex_tree_59.dimension()=" << simplex_tree_59.dimension() << std::endl; BOOST_CHECK(simplex_tree_59.dimension() == 2); - std::cout << "simplex_tree_59.num_vertices()=" << simplex_tree_59.num_vertices() << std::endl; + std::clog << "simplex_tree_59.num_vertices()=" << simplex_tree_59.num_vertices() << std::endl; BOOST_CHECK(simplex_tree_59.num_vertices() == 7); - std::cout << "simplex_tree_59.num_simplices()=" << simplex_tree_59.num_simplices() << std::endl; + std::clog << "simplex_tree_59.num_simplices()=" << simplex_tree_59.num_simplices() << std::endl; BOOST_CHECK(simplex_tree_59.num_simplices() == 23); } @@ -115,30 +115,30 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) { // ---------------------------------------------------------------------------- Gudhi::alpha_complex::Alpha_complex alpha_complex_from_points(points); - std::cout << "========== Alpha_complex_from_points ==========" << std::endl; + std::clog << "========== Alpha_complex_from_points ==========" << std::endl; Gudhi::Simplex_tree<> simplex_tree; BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree)); // Another way to check num_simplices - std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; int num_simplices = 0; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { num_simplices++; - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << std::endl; } BOOST_CHECK(num_simplices == 15); - std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; + std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; BOOST_CHECK(simplex_tree.num_simplices() == 15); - std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; + std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; BOOST_CHECK(simplex_tree.dimension() == 3); - std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; + std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; BOOST_CHECK(simplex_tree.num_vertices() == points.size()); for (auto f_simplex : simplex_tree.filtration_simplex_range()) { @@ -162,22 +162,22 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) { } Point_4 p0 = alpha_complex_from_points.get_point(0); - std::cout << "alpha_complex_from_points.get_point(0)=" << p0 << std::endl; + std::clog << "alpha_complex_from_points.get_point(0)=" << p0 << std::endl; BOOST_CHECK(4 == p0.dimension()); BOOST_CHECK(is_point_in_list(points, p0)); Point_4 p1 = alpha_complex_from_points.get_point(1); - std::cout << "alpha_complex_from_points.get_point(1)=" << p1 << std::endl; + std::clog << "alpha_complex_from_points.get_point(1)=" << p1 << std::endl; BOOST_CHECK(4 == p1.dimension()); BOOST_CHECK(is_point_in_list(points, p1)); Point_4 p2 = alpha_complex_from_points.get_point(2); - std::cout << "alpha_complex_from_points.get_point(2)=" << p2 << std::endl; + std::clog << "alpha_complex_from_points.get_point(2)=" << p2 << std::endl; BOOST_CHECK(4 == p2.dimension()); BOOST_CHECK(is_point_in_list(points, p2)); Point_4 p3 = alpha_complex_from_points.get_point(3); - std::cout << "alpha_complex_from_points.get_point(3)=" << p3 << std::endl; + std::clog << "alpha_complex_from_points.get_point(3)=" << p3 << std::endl; BOOST_CHECK(4 == p3.dimension()); BOOST_CHECK(is_point_in_list(points, p3)); @@ -194,24 +194,24 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) { BOOST_CHECK(modified); // Another way to check num_simplices - std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl; num_simplices = 0; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { num_simplices++; - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << std::endl; } BOOST_CHECK(num_simplices == 10); - std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; + std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; BOOST_CHECK(simplex_tree.num_simplices() == 10); - std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; + std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; BOOST_CHECK(simplex_tree.dimension() == 1); - std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; + std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; BOOST_CHECK(simplex_tree.num_vertices() == 4); for (auto f_simplex : simplex_tree.filtration_simplex_range()) { @@ -231,7 +231,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) { } BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, list_of_kernel_variants) { - std::cout << "========== Alpha_complex_from_empty_points ==========" << std::endl; + std::clog << "========== Alpha_complex_from_empty_points ==========" << std::endl; // ---------------------------------------------------------------------------- // Init of an empty list of points @@ -249,13 +249,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, lis Gudhi::Simplex_tree<> simplex_tree; BOOST_CHECK(!alpha_complex_from_points.create_complex(simplex_tree)); - std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; + std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl; BOOST_CHECK(simplex_tree.num_simplices() == 0); - std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; + std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl; BOOST_CHECK(simplex_tree.dimension() == -1); - std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; + std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; BOOST_CHECK(simplex_tree.num_vertices() == points.size()); } @@ -264,7 +264,7 @@ using Exact_kernel_2 = CGAL::Epeck_d< CGAL::Dimension_tag<2> >; using list_of_kernel_2_variants = boost::mpl::list; BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel, list_of_kernel_2_variants) { - std::cout << "========== Alpha_complex_with_duplicated_points ==========" << std::endl; + std::clog << "========== Alpha_complex_with_duplicated_points ==========" << std::endl; using Point = typename TestedKernel::Point_d; using Vector_of_points = std::vector; @@ -287,14 +287,14 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel // ---------------------------------------------------------------------------- // Init of an alpha complex from the list of points // ---------------------------------------------------------------------------- - std::cout << "Init" << std::endl; + std::clog << "Init" << std::endl; Gudhi::alpha_complex::Alpha_complex alpha_complex_from_points(points); Gudhi::Simplex_tree<> simplex_tree; - std::cout << "create_complex" << std::endl; + std::clog << "create_complex" << std::endl; BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree)); - std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() + std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl; BOOST_CHECK(simplex_tree.num_vertices() < points.size()); } diff --git a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp index 731763fa..9eef920b 100644 --- a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp +++ b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp @@ -43,14 +43,14 @@ typedef boost::mpl::list p_points; // Not important, this is not what we want to check p_points.push_back(Bare_point_3(0.0, 0.0, 0.0)); - std::cout << "Check exception throw in debug mode" << std::endl; + std::clog << "Check exception throw in debug mode" << std::endl; // Check it throws an exception when the cuboid is not iso BOOST_CHECK_THROW(Periodic_alpha_complex_3d periodic_alpha_complex(p_points, 0., 0., 0., 0.9, 1., 1.), std::invalid_argument); @@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) { // --------------------- // Fast periodic version // --------------------- - std::cout << "Fast periodic alpha complex 3d" << std::endl; + std::clog << "Fast periodic alpha complex 3d" << std::endl; using Creator = CGAL::Creator_uniform_3; CGAL::Random random(7); @@ -106,7 +106,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) { // ---------------------- // Exact periodic version // ---------------------- - std::cout << "Exact periodic alpha complex 3d" << std::endl; + std::clog << "Exact periodic alpha complex 3d" << std::endl; std::vector e_p_points; @@ -122,13 +122,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) { // --------------------- // Compare both versions // --------------------- - std::cout << "Exact periodic alpha complex 3d is of dimension " << exact_stree.dimension() << " - Non exact is " + std::clog << "Exact periodic alpha complex 3d is of dimension " << exact_stree.dimension() << " - Non exact is " << stree.dimension() << std::endl; BOOST_CHECK(exact_stree.dimension() == stree.dimension()); - std::cout << "Exact periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Non exact is " + std::clog << "Exact periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Non exact is " << stree.num_simplices() << std::endl; BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices()); - std::cout << "Exact periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Non exact is " + std::clog << "Exact periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Non exact is " << stree.num_vertices() << std::endl; BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices()); @@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) { // ---------------------- // Safe periodic version // ---------------------- - std::cout << "Safe periodic alpha complex 3d" << std::endl; + std::clog << "Safe periodic alpha complex 3d" << std::endl; std::vector s_p_points; diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp index 8035f6e8..6b31bea6 100644 --- a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp +++ b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp @@ -55,13 +55,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_throw, Weighted_alpha_compl // weights size is different from w_points size to make weighted Alpha_complex_3d throw in debug mode std::vector weights = {0.01, 0.005, 0.006, 0.01, 0.009, 0.001}; - std::cout << "Check exception throw in debug mode" << std::endl; + std::clog << "Check exception throw in debug mode" << std::endl; BOOST_CHECK_THROW(Weighted_alpha_complex_3d wac(w_points, weights), std::invalid_argument); } #endif BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, weighted_variants_type_list) { - std::cout << "Weighted alpha complex 3d from points and weights" << std::endl; + std::clog << "Weighted alpha complex 3d from points and weights" << std::endl; using Bare_point_3 = typename Weighted_alpha_complex_3d::Bare_point_3; std::vector w_points; w_points.push_back(Bare_point_3(0.0, 0.0, 0.0)); @@ -78,7 +78,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, Gudhi::Simplex_tree<> stree; alpha_complex_p_a_w.create_complex(stree); - std::cout << "Weighted alpha complex 3d from weighted points" << std::endl; + std::clog << "Weighted alpha complex 3d from weighted points" << std::endl; using Weighted_point_3 = typename Weighted_alpha_complex_3d::Weighted_point_3; std::vector weighted_points; @@ -112,13 +112,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, // --------------------- // Compare both versions // --------------------- - std::cout << "Weighted alpha complex 3d is of dimension " << stree_bis.dimension() << " - versus " + std::clog << "Weighted alpha complex 3d is of dimension " << stree_bis.dimension() << " - versus " << stree.dimension() << std::endl; BOOST_CHECK(stree_bis.dimension() == stree.dimension()); - std::cout << "Weighted alpha complex 3d num_simplices " << stree_bis.num_simplices() << " - versus " + std::clog << "Weighted alpha complex 3d num_simplices " << stree_bis.num_simplices() << " - versus " << stree.num_simplices() << std::endl; BOOST_CHECK(stree_bis.num_simplices() == stree.num_simplices()); - std::cout << "Weighted alpha complex 3d num_vertices " << stree_bis.num_vertices() << " - versus " + std::clog << "Weighted alpha complex 3d num_vertices " << stree_bis.num_vertices() << " - versus " << stree.num_vertices() << std::endl; BOOST_CHECK(stree_bis.num_vertices() == stree.num_vertices()); @@ -127,18 +127,18 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, std::vector simplex; std::vector exact_simplex; #ifdef DEBUG_TRACES - std::cout << " ( "; + std::clog << " ( "; #endif for (auto vertex : stree.simplex_vertex_range(*sh)) { simplex.push_back(vertex); #ifdef DEBUG_TRACES - std::cout << vertex << " "; + std::clog << vertex << " "; #endif } #ifdef DEBUG_TRACES - std::cout << ") -> " + std::clog << ") -> " << "[" << stree.filtration(*sh) << "] "; - std::cout << std::endl; + std::clog << std::endl; #endif // Find it in the exact structure diff --git a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp index b09e92d5..610b9f3d 100644 --- a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp +++ b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp @@ -45,7 +45,7 @@ typedef boost::mpl::list; CGAL::Random random(7); @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe p_weights.push_back(random.get_double(0., 0.01)); } - std::cout << "Cuboid is not iso exception" << std::endl; + std::clog << "Cuboid is not iso exception" << std::endl; // Check it throws an exception when the cuboid is not iso BOOST_CHECK_THROW( Weighted_periodic_alpha_complex_3d wp_alpha_complex(wp_points, p_weights, -1., -1., -1., 0.9, 1., 1.), @@ -83,7 +83,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe Weighted_periodic_alpha_complex_3d wp_alpha_complex(wp_points, p_weights, -1., -1., -1., 1., 1., 1.1), std::invalid_argument); - std::cout << "0 <= point.weight() < 1/64 * domain_size * domain_size exception" << std::endl; + std::clog << "0 <= point.weight() < 1/64 * domain_size * domain_size exception" << std::endl; // Weights must be in range ]0, 1/64 = 0.015625[ double temp = p_weights[25]; p_weights[25] = 1.0; @@ -97,7 +97,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe std::invalid_argument); p_weights[14] = temp; - std::cout << "wp_points and p_weights size exception" << std::endl; + std::clog << "wp_points and p_weights size exception" << std::endl; // Weights and points must have the same size // + 1 p_weights.push_back(1e-10); @@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) { // --------------------- // Fast weighted periodic version // --------------------- - std::cout << "Fast weighted periodic alpha complex 3d" << std::endl; + std::clog << "Fast weighted periodic alpha complex 3d" << std::endl; using Creator = CGAL::Creator_uniform_3; CGAL::Random random(7); @@ -140,7 +140,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) { // ---------------------- // Exact weighted periodic version // ---------------------- - std::cout << "Exact weighted periodic alpha complex 3d" << std::endl; + std::clog << "Exact weighted periodic alpha complex 3d" << std::endl; std::vector e_p_points; @@ -156,13 +156,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) { // --------------------- // Compare both versions // --------------------- - std::cout << "Exact weighted periodic alpha complex 3d is of dimension " << exact_stree.dimension() + std::clog << "Exact weighted periodic alpha complex 3d is of dimension " << exact_stree.dimension() << " - Non exact is " << stree.dimension() << std::endl; BOOST_CHECK(exact_stree.dimension() == stree.dimension()); - std::cout << "Exact weighted periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() + std::clog << "Exact weighted periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Non exact is " << stree.num_simplices() << std::endl; BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices()); - std::cout << "Exact weighted periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() + std::clog << "Exact weighted periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Non exact is " << stree.num_vertices() << std::endl; BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices()); @@ -189,7 +189,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) { // ---------------------- // Safe weighted periodic version // ---------------------- - std::cout << "Safe weighted periodic alpha complex 3d" << std::endl; + std::clog << "Safe weighted periodic alpha complex 3d" << std::endl; std::vector s_p_points; diff --git a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp index 929fc2e8..e93c412e 100644 --- a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp +++ b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp @@ -225,7 +225,7 @@ int main(int argc, char **argv) { // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); - std::cout << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl; + std::clog << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl; // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree, true); // initializes the coefficient field for homology @@ -237,7 +237,7 @@ int main(int argc, char **argv) { if (output_file_diag.empty()) { pcoh.output_diagram(); } else { - std::cout << "Result in file: " << output_file_diag << std::endl; + std::clog << "Result in file: " << output_file_diag << std::endl; std::ofstream out(output_file_diag); pcoh.output_diagram(out); out.close(); @@ -266,7 +266,7 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool "cuboid-file,c", po::value(&cuboid_file), "Name of file describing the periodic domain. Format is:\n min_hx min_hy min_hz\n max_hx max_hy max_hz")( "output-file,o", po::value(&output_file_diag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-alpha-square-value,r", po::value(&alpha_square_max_value) ->default_value(std::numeric_limits::infinity()), @@ -288,18 +288,18 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool po::notify(vm); if (vm.count("help") || !vm.count("input-file") || !vm.count("weight-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a 3D Alpha complex defined on a set of input points.\n"; - std::cout << "3D Alpha complex can be safe (by default) exact or fast, weighted and/or periodic\n\n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients.\n\n"; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a 3D Alpha complex defined on a set of input points.\n"; + std::clog << "3D Alpha complex can be safe (by default) exact or fast, weighted and/or periodic\n\n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients.\n\n"; - std::cout << "Usage: " << argv[0] << " [options] input-file weight-file\n\n"; - std::cout << visible << std::endl; + std::clog << "Usage: " << argv[0] << " [options] input-file weight-file\n\n"; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp index 486347cc..be60ff78 100644 --- a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp +++ b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp @@ -72,13 +72,13 @@ int main(int argc, char **argv) { // ---------------------------------------------------------------------------- // Display information about the alpha complex // ---------------------------------------------------------------------------- - std::cout << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() + std::clog << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; // Sort the simplices in the order of the filtration simplex.initialize_filtration(); - std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl; + std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl; // Compute the persistence diagram of the complex Gudhi::persistent_cohomology::Persistent_cohomology pcoh( simplex); @@ -91,7 +91,7 @@ int main(int argc, char **argv) { if (output_file_diag.empty()) { pcoh.output_diagram(); } else { - std::cout << "Result in file: " << output_file_diag << std::endl; + std::clog << "Result in file: " << output_file_diag << std::endl; std::ofstream out(output_file_diag); pcoh.output_diagram(out); out.close(); @@ -114,7 +114,7 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool "fast,f", po::bool_switch(&fast), "To activate fast version of Alpha complex (default is false, not available if exact is set)")( "output-file,o", po::value(&output_file_diag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-alpha-square-value,r", po::value(&alpha_square_max_value) ->default_value(std::numeric_limits::infinity()), "Maximal alpha square value for the Alpha complex construction.")( @@ -135,17 +135,17 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of an Alpha complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of an Alpha complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp b/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp index 46ea8f2e..e5512418 100644 --- a/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp +++ b/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp @@ -21,7 +21,7 @@ int main(int argc, char** argv) { srand(time(0)); - std::cout + std::clog << "This program computes persistent homology, by using bitmap_cubical_complex class, of cubical " << "complexes. The first parameter of the program is the dimension D of the bitmap. The next D parameters are " << "number of top dimensional cubes in each dimension of the bitmap. The program will create random cubical " diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h index 2f95dff3..aa255ec2 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h @@ -69,7 +69,7 @@ class Bitmap_cubical_complex : public T { Bitmap_cubical_complex(const char* perseus_style_file) : T(perseus_style_file), key_associated_to_simplex(this->total_number_of_cells + 1) { if (globalDbg) { - std::cout << "Bitmap_cubical_complex( const char* perseus_style_file )\n"; + std::clog << "Bitmap_cubical_complex( const char* perseus_style_file )\n"; } for (std::size_t i = 0; i != this->total_number_of_cells; ++i) { this->key_associated_to_simplex[i] = i; @@ -137,7 +137,7 @@ class Bitmap_cubical_complex : public T { **/ static Simplex_handle null_simplex() { if (globalDbg) { - std::cout << "Simplex_handle null_simplex()\n"; + std::clog << "Simplex_handle null_simplex()\n"; } return std::numeric_limits::max(); } @@ -152,7 +152,7 @@ class Bitmap_cubical_complex : public T { **/ inline unsigned dimension(Simplex_handle sh) const { if (globalDbg) { - std::cout << "unsigned dimension(const Simplex_handle& sh)\n"; + std::clog << "unsigned dimension(const Simplex_handle& sh)\n"; } if (sh != null_simplex()) return this->get_dimension_of_a_cell(sh); return -1; @@ -163,7 +163,7 @@ class Bitmap_cubical_complex : public T { **/ Filtration_value filtration(Simplex_handle sh) { if (globalDbg) { - std::cout << "Filtration_value filtration(const Simplex_handle& sh)\n"; + std::clog << "Filtration_value filtration(const Simplex_handle& sh)\n"; } // Returns the filtration value of a simplex. if (sh != null_simplex()) return this->data[sh]; @@ -175,7 +175,7 @@ class Bitmap_cubical_complex : public T { **/ static Simplex_key null_key() { if (globalDbg) { - std::cout << "Simplex_key null_key()\n"; + std::clog << "Simplex_key null_key()\n"; } return std::numeric_limits::max(); } @@ -185,7 +185,7 @@ class Bitmap_cubical_complex : public T { **/ Simplex_key key(Simplex_handle sh) const { if (globalDbg) { - std::cout << "Simplex_key key(const Simplex_handle& sh)\n"; + std::clog << "Simplex_key key(const Simplex_handle& sh)\n"; } if (sh != null_simplex()) { return this->key_associated_to_simplex[sh]; @@ -198,7 +198,7 @@ class Bitmap_cubical_complex : public T { **/ Simplex_handle simplex(Simplex_key key) { if (globalDbg) { - std::cout << "Simplex_handle simplex(Simplex_key key)\n"; + std::clog << "Simplex_handle simplex(Simplex_key key)\n"; } if (key != null_key()) { return this->simplex_associated_to_key[key]; @@ -211,7 +211,7 @@ class Bitmap_cubical_complex : public T { **/ void assign_key(Simplex_handle sh, Simplex_key key) { if (globalDbg) { - std::cout << "void assign_key(Simplex_handle& sh, Simplex_key key)\n"; + std::clog << "void assign_key(Simplex_handle& sh, Simplex_key key)\n"; } if (key == null_key()) return; this->key_associated_to_simplex[sh] = key; @@ -251,7 +251,7 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator operator++() { if (globalDbg) { - std::cout << "Filtration_simplex_iterator operator++\n"; + std::clog << "Filtration_simplex_iterator operator++\n"; } ++this->position; return (*this); @@ -265,7 +265,7 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator& operator=(const Filtration_simplex_iterator& rhs) { if (globalDbg) { - std::cout << "Filtration_simplex_iterator operator =\n"; + std::clog << "Filtration_simplex_iterator operator =\n"; } this->b = rhs.b; this->position = rhs.position; @@ -274,21 +274,21 @@ class Bitmap_cubical_complex : public T { bool operator==(const Filtration_simplex_iterator& rhs) const { if (globalDbg) { - std::cout << "bool operator == ( const Filtration_simplex_iterator& rhs )\n"; + std::clog << "bool operator == ( const Filtration_simplex_iterator& rhs )\n"; } return (this->position == rhs.position); } bool operator!=(const Filtration_simplex_iterator& rhs) const { if (globalDbg) { - std::cout << "bool operator != ( const Filtration_simplex_iterator& rhs )\n"; + std::clog << "bool operator != ( const Filtration_simplex_iterator& rhs )\n"; } return !(*this == rhs); } Simplex_handle operator*() { if (globalDbg) { - std::cout << "Simplex_handle operator*()\n"; + std::clog << "Simplex_handle operator*()\n"; } return this->b->simplex_associated_to_key[this->position]; } @@ -314,14 +314,14 @@ class Bitmap_cubical_complex : public T { Filtration_simplex_iterator begin() { if (globalDbg) { - std::cout << "Filtration_simplex_iterator begin() \n"; + std::clog << "Filtration_simplex_iterator begin() \n"; } return Filtration_simplex_iterator(this->b); } Filtration_simplex_iterator end() { if (globalDbg) { - std::cout << "Filtration_simplex_iterator end()\n"; + std::clog << "Filtration_simplex_iterator end()\n"; } Filtration_simplex_iterator it(this->b); it.position = this->b->simplex_associated_to_key.size(); @@ -347,7 +347,7 @@ class Bitmap_cubical_complex : public T { **/ Filtration_simplex_range filtration_simplex_range() { if (globalDbg) { - std::cout << "Filtration_simplex_range filtration_simplex_range()\n"; + std::clog << "Filtration_simplex_range filtration_simplex_range()\n"; } // Returns a range over the simplices of the complex in the order of the filtration return Filtration_simplex_range(this); @@ -370,8 +370,8 @@ class Bitmap_cubical_complex : public T { std::pair endpoints(Simplex_handle sh) { std::vector bdry = this->get_boundary_of_a_cell(sh); if (globalDbg) { - std::cout << "std::pair endpoints( Simplex_handle sh )\n"; - std::cout << "bdry.size() : " << bdry.size() << "\n"; + std::clog << "std::pair endpoints( Simplex_handle sh )\n"; + std::clog << "bdry.size() : " << bdry.size() << "\n"; } // this method returns two first elements from the boundary of sh. if (bdry.size() < 2) @@ -392,7 +392,7 @@ class Bitmap_cubical_complex : public T { public: Skeleton_simplex_iterator(Bitmap_cubical_complex* b, std::size_t d) : b(b), dimension(d) { if (globalDbg) { - std::cout << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n"; + std::clog << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n"; } // find the position of the first simplex of a dimension d this->position = 0; @@ -406,7 +406,7 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator operator++() { if (globalDbg) { - std::cout << "Skeleton_simplex_iterator operator++()\n"; + std::clog << "Skeleton_simplex_iterator operator++()\n"; } // increment the position as long as you did not get to the next element of the dimension dimension. ++this->position; @@ -425,7 +425,7 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator& operator=(const Skeleton_simplex_iterator& rhs) { if (globalDbg) { - std::cout << "Skeleton_simplex_iterator operator =\n"; + std::clog << "Skeleton_simplex_iterator operator =\n"; } this->b = rhs.b; this->position = rhs.position; @@ -435,21 +435,21 @@ class Bitmap_cubical_complex : public T { bool operator==(const Skeleton_simplex_iterator& rhs) const { if (globalDbg) { - std::cout << "bool operator ==\n"; + std::clog << "bool operator ==\n"; } return (this->position == rhs.position); } bool operator!=(const Skeleton_simplex_iterator& rhs) const { if (globalDbg) { - std::cout << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n"; + std::clog << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n"; } return !(*this == rhs); } Simplex_handle operator*() { if (globalDbg) { - std::cout << "Simplex_handle operator*() \n"; + std::clog << "Simplex_handle operator*() \n"; } return this->position; } @@ -476,14 +476,14 @@ class Bitmap_cubical_complex : public T { Skeleton_simplex_iterator begin() { if (globalDbg) { - std::cout << "Skeleton_simplex_iterator begin()\n"; + std::clog << "Skeleton_simplex_iterator begin()\n"; } return Skeleton_simplex_iterator(this->b, this->dimension); } Skeleton_simplex_iterator end() { if (globalDbg) { - std::cout << "Skeleton_simplex_iterator end()\n"; + std::clog << "Skeleton_simplex_iterator end()\n"; } Skeleton_simplex_iterator it(this->b, this->dimension); it.position = this->b->data.size(); @@ -500,7 +500,7 @@ class Bitmap_cubical_complex : public T { **/ Skeleton_simplex_range skeleton_simplex_range(unsigned dimension) { if (globalDbg) { - std::cout << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n"; + std::clog << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n"; } return Skeleton_simplex_range(this, dimension); } @@ -515,7 +515,7 @@ class Bitmap_cubical_complex : public T { template void Bitmap_cubical_complex::initialize_simplex_associated_to_key() { if (globalDbg) { - std::cout << "void Bitmap_cubical_complex::initialize_elements_ordered_according_to_filtration() \n"; + std::clog << "void Bitmap_cubical_complex::initialize_elements_ordered_according_to_filtration() \n"; } this->simplex_associated_to_key = std::vector(this->data.size()); std::iota(std::begin(simplex_associated_to_key), std::end(simplex_associated_to_key), 0); diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 96036fd4..1eb77c9c 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -408,7 +408,7 @@ class Bitmap_cubical_complex_base { void print_counter() const { for (std::size_t i = 0; i != this->counter.size(); ++i) { - std::cout << this->counter[i] << " "; + std::clog << this->counter[i] << " "; } } friend class Bitmap_cubical_complex_base; @@ -521,11 +521,11 @@ void Bitmap_cubical_complex_base::put_data_to_bins(std::size_t number_of_bins // now put the data into the appropriate bins: for (std::size_t i = 0; i != this->data.size(); ++i) { if (dbg) { - std::cout << "Before binning : " << this->data[i] << std::endl; + std::clog << "Before binning : " << this->data[i] << std::endl; } this->data[i] = min_max.first + dx * (this->data[i] - min_max.first) / number_of_bins; if (dbg) { - std::cout << "After binning : " << this->data[i] << std::endl; + std::clog << "After binning : " << this->data[i] << std::endl; } } } @@ -539,11 +539,11 @@ void Bitmap_cubical_complex_base::put_data_to_bins(T diameter_of_bin) { // now put the data into the appropriate bins: for (std::size_t i = 0; i != this->data.size(); ++i) { if (dbg) { - std::cout << "Before binning : " << this->data[i] << std::endl; + std::clog << "Before binning : " << this->data[i] << std::endl; } this->data[i] = min_max.first + diameter_of_bin * (this->data[i] - min_max.first) / number_of_bins; if (dbg) { - std::cout << "After binning : " << this->data[i] << std::endl; + std::clog << "After binning : " << this->data[i] << std::endl; } } } @@ -617,7 +617,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus inFiltration >> dimensionOfData; if (dbg) { - std::cout << "dimensionOfData : " << dimensionOfData << std::endl; + std::clog << "dimensionOfData : " << dimensionOfData << std::endl; } std::vector sizes; @@ -630,7 +630,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus sizes.push_back(size_in_this_dimension); dimensions *= size_in_this_dimension; if (dbg) { - std::cout << "size_in_this_dimension : " << size_in_this_dimension << std::endl; + std::clog << "size_in_this_dimension : " << size_in_this_dimension << std::endl; } } this->set_up_containers(sizes); @@ -651,7 +651,7 @@ void Bitmap_cubical_complex_base::read_perseus_style_file(const char* perseus } if (dbg) { - std::cout << "Cell of an index : " << it.compute_index_in_bitmap() + std::clog << "Cell of an index : " << it.compute_index_in_bitmap() << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap()) << " get the value : " << filtrationLevel << std::endl; } @@ -754,20 +754,20 @@ std::vector Bitmap_cubical_complex_base::get_coboundary_of_a_cel template unsigned Bitmap_cubical_complex_base::get_dimension_of_a_cell(std::size_t cell) const { bool dbg = false; - if (dbg) std::cout << "\n\n\n Computing position o a cell of an index : " << cell << std::endl; + if (dbg) std::clog << "\n\n\n Computing position o a cell of an index : " << cell << std::endl; unsigned dimension = 0; for (std::size_t i = this->multipliers.size(); i != 0; --i) { unsigned position = cell / this->multipliers[i - 1]; if (dbg) { - std::cout << "i-1 :" << i - 1 << std::endl; - std::cout << "cell : " << cell << std::endl; - std::cout << "position : " << position << std::endl; - std::cout << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl; + std::clog << "i-1 :" << i - 1 << std::endl; + std::clog << "cell : " << cell << std::endl; + std::clog << "position : " << position << std::endl; + std::clog << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl; } if (position % 2 == 1) { - if (dbg) std::cout << "Nonzero length in this direction \n"; + if (dbg) std::clog << "Nonzero length in this direction \n"; dimension++; } cell = cell % this->multipliers[i - 1]; @@ -803,9 +803,9 @@ void Bitmap_cubical_complex_base::impose_lower_star_filtration() { while (indices_to_consider.size()) { if (dbg) { - std::cout << "indices_to_consider in this iteration \n"; + std::clog << "indices_to_consider in this iteration \n"; for (std::size_t i = 0; i != indices_to_consider.size(); ++i) { - std::cout << indices_to_consider[i] << " "; + std::clog << indices_to_consider[i] << " "; } } std::vector new_indices_to_consider; @@ -813,14 +813,14 @@ void Bitmap_cubical_complex_base::impose_lower_star_filtration() { std::vector bd = this->get_boundary_of_a_cell(indices_to_consider[i]); for (std::size_t boundaryIt = 0; boundaryIt != bd.size(); ++boundaryIt) { if (dbg) { - std::cout << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]] + std::clog << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]] << " while of a cell: " << indices_to_consider[i] << " is: " << this->data[indices_to_consider[i]] << std::endl; } if (this->data[bd[boundaryIt]] > this->data[indices_to_consider[i]]) { this->data[bd[boundaryIt]] = this->data[indices_to_consider[i]]; if (dbg) { - std::cout << "Setting the value of a cell : " << bd[boundaryIt] + std::clog << "Setting the value of a cell : " << bd[boundaryIt] << " to : " << this->data[indices_to_consider[i]] << std::endl; } } diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h index 3942dc34..18901469 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h @@ -237,7 +237,7 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base::Bitmap_cubical_comp if (inFiltration.eof()) break; if (dbg) { - std::cout << "Cell of an index : " << it.compute_index_in_bitmap() + std::clog << "Cell of an index : " << it.compute_index_in_bitmap() << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap()) << " get the value : " << filtrationLevel << std::endl; } @@ -278,7 +278,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas std::size_t cell) const { bool dbg = false; if (dbg) { - std::cout << "Computations of boundary of a cell : " << cell << std::endl; + std::clog << "Computations of boundary of a cell : " << cell << std::endl; } std::vector boundary_elements; @@ -300,7 +300,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cout << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; + std::clog << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; } } else { // in this direction we have to do boundary conditions. Therefore, we need to check if we are not at the end. @@ -313,7 +313,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cout << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; + std::clog << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " "; } } else { if (sum_of_dimensions % 2) { @@ -324,7 +324,7 @@ std::vector Bitmap_cubical_complex_periodic_boundary_conditions_bas boundary_elements.push_back(cell - this->multipliers[i - 1]); } if (dbg) { - std::cout << cell - this->multipliers[i - 1] << " " + std::clog << cell - this->multipliers[i - 1] << " " << cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1] << " "; } } diff --git a/src/Bitmap_cubical_complex/test/Bitmap_test.cpp b/src/Bitmap_cubical_complex/test/Bitmap_test.cpp index f18adb36..6f35b6da 100644 --- a/src/Bitmap_cubical_complex/test/Bitmap_test.cpp +++ b/src/Bitmap_cubical_complex/test/Bitmap_test.cpp @@ -1402,12 +1402,12 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_2d) { it != ba.all_cells_iterator_end(); ++it) { int i = 1; - // std::cout << "Element : " << *it << std::endl; + // std::clog << "Element : " << *it << std::endl; Bitmap_cubical_complex_periodic_boundary_conditions_base::Boundary_range bdrange = ba.boundary_range(*it); for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin(); bd != bdrange.end(); ++bd) { - // std::cout << *bd << " "; + // std::clog << *bd << " "; Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd); int j = 1; for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin(); @@ -1441,7 +1441,7 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_3d) { std::vector elems_in_boundary(number_of_all_elements, 0); for (Bitmap_cubical_complex_periodic_boundary_conditions::All_cells_iterator it = ba.all_cells_iterator_begin(); it != ba.all_cells_iterator_end(); ++it) { - // std::cout << "Element : " << *it << std::endl; + // std::clog << "Element : " << *it << std::endl; int i = 1; @@ -1449,7 +1449,7 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_3d) { for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin(); bd != bdrange.end(); ++bd) { Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd); - // std::cout << *bd << " "; + // std::clog << *bd << " "; int j = 1; for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin(); bd2 != second_bdrange.end(); ++bd2) { @@ -1551,7 +1551,7 @@ BOOST_AUTO_TEST_CASE(compute_incidence_between_cells_test_periodic_boundary_cond Bitmap_cubical_complex_periodic_boundary_conditions_base::Boundary_range bdrange = ba.boundary_range(*it); for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin(); bd != bdrange.end(); ++bd) { - // std::cout << *bd << " "; + // std::clog << *bd << " "; Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd); for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin(); bd2 != second_bdrange.end(); ++bd2) { @@ -1571,11 +1571,11 @@ BOOST_AUTO_TEST_CASE(perseus_file_read) { auto it = increasing.top_dimensional_cells_iterator_begin(); double value = increasing.get_cell_data(*it); - std::cout << "First value of sinusoid.txt is " << value << std::endl; + std::clog << "First value of sinusoid.txt is " << value << std::endl; BOOST_CHECK(value == 10.); // Next value ++it; value = increasing.get_cell_data(*it); - std::cout << "Second value of sinusoid.txt is " << value << std::endl; + std::clog << "Second value of sinusoid.txt is " << value << std::endl; BOOST_CHECK(value == std::numeric_limits::infinity()); } diff --git a/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp b/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp index a9792c2d..510861cd 100644 --- a/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp +++ b/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp @@ -19,7 +19,7 @@ #include int main(int argc, char** argv) { - std::cout + std::clog << "This program computes persistent homology, by using bitmap_cubical_complex class, of cubical " << "complexes provided in text files in Perseus style (the only numbered in the first line is a dimension D of a" << "bitmap. In the lines I between 2 and D+1 there are numbers of top dimensional cells in the direction I. Let " @@ -62,7 +62,7 @@ int main(int argc, char** argv) { pcoh.output_diagram(out); out.close(); - std::cout << "Result in file: " << output_file_name << "\n"; + std::clog << "Result in file: " << output_file_name << "\n"; return 0; } diff --git a/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp b/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp index fa97bac0..86816417 100644 --- a/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp +++ b/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp @@ -20,7 +20,7 @@ #include int main(int argc, char** argv) { - std::cout + std::clog << "This program computes persistent homology, by using " << "Bitmap_cubical_complex_periodic_boundary_conditions class, of cubical complexes provided in text files in " << "Perseus style (the only numbered in the first line is a dimension D of a bitmap. In the lines I between 2 " @@ -64,7 +64,7 @@ int main(int argc, char** argv) { pcoh.output_diagram(out); out.close(); - std::cout << "Result in file: " << output_file_name << "\n"; + std::clog << "Result in file: " << output_file_name << "\n"; return 0; } diff --git a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h index bbc952e1..2a988b4b 100644 --- a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h +++ b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h @@ -52,7 +52,7 @@ int main() { diag2.emplace_back(0., 13.); double b = Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2); - std::cout << "Bottleneck distance = " << b << std::endl; + std::clog << "Bottleneck distance = " << b << std::endl; } * \endcode * diff --git a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp index 6c0dc9bf..4769eca3 100644 --- a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp +++ b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp @@ -68,7 +68,7 @@ int main(int argc, char * argv[]) { Simplex_tree rips_stree; rips_complex.create_complex(rips_stree, dim_max); - std::cout << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension " + std::clog << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension " << rips_stree.dimension() << " \n"; // Sort the simplices in the order of the filtration @@ -89,7 +89,7 @@ int main(int argc, char * argv[]) { Simplex_tree alpha_stree; alpha_complex.create_complex(alpha_stree, threshold * threshold); - std::cout << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension " + std::clog << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension " << alpha_stree.dimension() << " \n"; // Sort the simplices in the order of the filtration @@ -115,12 +115,12 @@ int main(int argc, char * argv[]) { std::transform(alpha_intervals.begin(), alpha_intervals.end(), alpha_intervals.begin(), compute_root_square); double bottleneck_distance = Gudhi::persistence_diagram::bottleneck_distance(rips_intervals, alpha_intervals); - std::cout << "In dimension " << dim << ", bottleneck distance = " << bottleneck_distance << std::endl; + std::clog << "In dimension " << dim << ", bottleneck distance = " << bottleneck_distance << std::endl; if (bottleneck_distance > max_b_distance) max_b_distance = bottleneck_distance; } - std::cout << "================================================================================" << std::endl; - std::cout << "Bottleneck distance is " << max_b_distance << std::endl; + std::clog << "================================================================================" << std::endl; + std::clog << "Bottleneck distance is " << max_b_distance << std::endl; return 0; } @@ -162,17 +162,17 @@ void program_options(int argc, char * argv[] po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Bottleneck_distance/example/bottleneck_basic_example.cpp b/src/Bottleneck_distance/example/bottleneck_basic_example.cpp index 61778a55..e8632a4f 100644 --- a/src/Bottleneck_distance/example/bottleneck_basic_example.cpp +++ b/src/Bottleneck_distance/example/bottleneck_basic_example.cpp @@ -20,9 +20,9 @@ int main() { double b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2); - std::cout << "Bottleneck distance = " << b << std::endl; + std::clog << "Bottleneck distance = " << b << std::endl; b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.1); - std::cout << "Approx bottleneck distance = " << b << std::endl; + std::clog << "Approx bottleneck distance = " << b << std::endl; } diff --git a/src/Bottleneck_distance/utilities/bottleneck_distance.cpp b/src/Bottleneck_distance/utilities/bottleneck_distance.cpp index d88a8a0b..01813ba1 100644 --- a/src/Bottleneck_distance/utilities/bottleneck_distance.cpp +++ b/src/Bottleneck_distance/utilities/bottleneck_distance.cpp @@ -18,7 +18,7 @@ int main(int argc, char** argv) { if (argc < 3) { - std::cout << "To run this program please provide as an input two files with persistence diagrams. Each file" << + std::clog << "To run this program please provide as an input two files with persistence diagrams. Each file" << " should contain a birth-death pair per line. Third, optional parameter is an error bound on the bottleneck" << " distance (set by default to the smallest positive double value). If you set the error bound to 0, be" << " aware this version is exact but expensive. The program will now terminate \n"; @@ -32,7 +32,7 @@ int main(int argc, char** argv) { tolerance = atof(argv[3]); } double b = Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, tolerance); - std::cout << "The distance between the diagrams is : " << b << ". The tolerance is : " << tolerance << std::endl; + std::clog << "The distance between the diagrams is : " << b << ". The tolerance is : " << tolerance << std::endl; return 0; } diff --git a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp index d2d71dbf..e489e8a4 100644 --- a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp +++ b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp @@ -68,24 +68,24 @@ int main(int argc, char* argv[]) { Proximity_graph euclidean_prox_graph = Gudhi::compute_proximity_graph( off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance()); - std::cout << euclidean_clock << std::endl; + std::clog << euclidean_clock << std::endl; Gudhi::Clock miniball_clock("Minimal_enclosing_ball_radius"); // Compute the proximity graph of the points Proximity_graph miniball_prox_graph = Gudhi::compute_proximity_graph( off_reader.get_point_cloud(), threshold, Minimal_enclosing_ball_radius()); - std::cout << miniball_clock << std::endl; + std::clog << miniball_clock << std::endl; Gudhi::Clock common_miniball_clock("Gudhi::Minimal_enclosing_ball_radius()"); // Compute the proximity graph of the points Proximity_graph common_miniball_prox_graph = Gudhi::compute_proximity_graph( off_reader.get_point_cloud(), threshold, Gudhi::Minimal_enclosing_ball_radius()); - std::cout << common_miniball_clock << std::endl; + std::clog << common_miniball_clock << std::endl; boost::filesystem::path full_path(boost::filesystem::current_path()); - std::cout << "Current path is : " << full_path << std::endl; + std::clog << "Current path is : " << full_path << std::endl; - std::cout << "File name;Radius;Rips time;Cech time; Ratio Rips/Cech time;Rips nb simplices;Cech nb simplices;" + std::clog << "File name;Radius;Rips time;Cech time; Ratio Rips/Cech time;Rips nb simplices;Cech nb simplices;" << std::endl; boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end for (boost::filesystem::directory_iterator itr(boost::filesystem::current_path()); itr != end_itr; ++itr) { @@ -96,8 +96,8 @@ int main(int argc, char* argv[]) { Point p0 = off_reader.get_point_cloud()[0]; for (Filtration_value radius = 0.1; radius < 0.4; radius += 0.1) { - std::cout << itr->path().stem() << ";"; - std::cout << radius << ";"; + std::clog << itr->path().stem() << ";"; + std::clog << radius << ";"; Gudhi::Clock rips_clock("Rips computation"); Rips_complex rips_complex_from_points(off_reader.get_point_cloud(), radius, Gudhi::Minimal_enclosing_ball_radius()); @@ -107,7 +107,7 @@ int main(int argc, char* argv[]) { // Display information about the Rips complex // ------------------------------------------ double rips_sec = rips_clock.num_seconds(); - std::cout << rips_sec << ";"; + std::clog << rips_sec << ";"; Gudhi::Clock cech_clock("Cech computation"); Cech_complex cech_complex_from_points(off_reader.get_point_cloud(), radius); @@ -117,12 +117,12 @@ int main(int argc, char* argv[]) { // Display information about the Cech complex // ------------------------------------------ double cech_sec = cech_clock.num_seconds(); - std::cout << cech_sec << ";"; - std::cout << cech_sec / rips_sec << ";"; + std::clog << cech_sec << ";"; + std::clog << cech_sec / rips_sec << ";"; assert(rips_stree.num_simplices() >= cech_stree.num_simplices()); - std::cout << rips_stree.num_simplices() << ";"; - std::cout << cech_stree.num_simplices() << ";" << std::endl; + std::clog << rips_stree.num_simplices() << ";"; + std::clog << cech_stree.num_simplices() << ";" << std::endl; } } } diff --git a/src/Cech_complex/example/cech_complex_example_from_points.cpp b/src/Cech_complex/example/cech_complex_example_from_points.cpp index 3cc5a4df..1a1f708c 100644 --- a/src/Cech_complex/example/cech_complex_example_from_points.cpp +++ b/src/Cech_complex/example/cech_complex_example_from_points.cpp @@ -37,18 +37,18 @@ int main() { // ---------------------------------------------------------------------------- // Display information about the one skeleton Cech complex // ---------------------------------------------------------------------------- - std::cout << "Cech complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " + std::clog << "Cech complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " + std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << std::endl; } return 0; } diff --git a/src/Cech_complex/example/cech_complex_step_by_step.cpp b/src/Cech_complex/example/cech_complex_step_by_step.cpp index b3d05697..f59f0293 100644 --- a/src/Cech_complex/example/cech_complex_step_by_step.cpp +++ b/src/Cech_complex/example/cech_complex_step_by_step.cpp @@ -51,12 +51,12 @@ class Cech_blocker { for (auto vertex : simplex_tree_.simplex_vertex_range(sh)) { points.push_back(point_cloud_[vertex]); #ifdef DEBUG_TRACES - std::cout << "#(" << vertex << ")#"; + std::clog << "#(" << vertex << ")#"; #endif // DEBUG_TRACES } Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points); #ifdef DEBUG_TRACES - std::cout << "radius = " << radius << " - " << (radius > max_radius_) << std::endl; + std::clog << "radius = " << radius << " - " << (radius > max_radius_) << std::endl; #endif // DEBUG_TRACES simplex_tree_.assign_filtration(sh, radius); return (radius > max_radius_); @@ -96,23 +96,23 @@ int main(int argc, char* argv[]) { // expand the graph until dimension dim_max st.expansion_with_blockers(dim_max, Cech_blocker(st, max_radius, off_reader.get_point_cloud())); - std::cout << "The complex contains " << st.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << st.dimension() << " \n"; + std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << st.dimension() << " \n"; // Sort the simplices in the order of the filtration st.initialize_filtration(); #if DEBUG_TRACES - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << static_cast(vertex) << " "; + std::clog << static_cast(vertex) << " "; } - std::cout << std::endl; + std::clog << std::endl; } #endif // DEBUG_TRACES @@ -144,11 +144,11 @@ void program_options(int argc, char* argv[], std::string& off_file_points, Filtr po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Construct a Cech complex defined on a set of input points.\n \n"; + std::clog << std::endl; + std::clog << "Construct a Cech complex defined on a set of input points.\n \n"; - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h index 068cdde3..31b9aab5 100644 --- a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h +++ b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h @@ -53,12 +53,12 @@ class Cech_blocker { for (auto vertex : sc_ptr_->simplex_vertex_range(sh)) { points.push_back(cc_ptr_->get_point(vertex)); #ifdef DEBUG_TRACES - std::cout << "#(" << vertex << ")#"; + std::clog << "#(" << vertex << ")#"; #endif // DEBUG_TRACES } Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points); #ifdef DEBUG_TRACES - if (radius > cc_ptr_->max_radius()) std::cout << "radius > max_radius => expansion is blocked\n"; + if (radius > cc_ptr_->max_radius()) std::clog << "radius > max_radius => expansion is blocked\n"; #endif // DEBUG_TRACES sc_ptr_->assign_filtration(sh, radius); return (radius > cc_ptr_->max_radius()); diff --git a/src/Cech_complex/test/test_cech_complex.cpp b/src/Cech_complex/test/test_cech_complex.cpp index c6b15d7f..6e00d7b5 100644 --- a/src/Cech_complex/test/test_cech_complex.cpp +++ b/src/Cech_complex/test/test_cech_complex.cpp @@ -58,7 +58,7 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { points.push_back({-0.5, 2.}); // 10 Filtration_value max_radius = 1.0; - std::cout << "========== NUMBER OF POINTS = " << points.size() << " - Cech max_radius = " << max_radius + std::clog << "========== NUMBER OF POINTS = " << points.size() << " - Cech max_radius = " << max_radius << "==========" << std::endl; Cech_complex cech_complex_for_doc(points, max_radius); @@ -72,14 +72,14 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { const int DIMENSION_1 = 1; Simplex_tree st; cech_complex_for_doc.create_complex(st, DIMENSION_1); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION_1); const int NUMBER_OF_VERTICES = 11; - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 27); // Check filtration values of vertices is 0.0 @@ -91,12 +91,12 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) { if (DIMENSION_1 == st.dimension(f_simplex)) { std::vector vp; - std::cout << "vertex = ("; + std::clog << "vertex = ("; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << ","; + std::clog << vertex << ","; vp.push_back(points.at(vertex)); } - std::cout << ") - distance =" << Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1)) + std::clog << ") - distance =" << Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1)) << " - filtration =" << st.filtration(f_simplex) << std::endl; BOOST_CHECK(vp.size() == 2); GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), @@ -112,13 +112,13 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { Simplex_tree st2; cech_complex_for_doc.create_complex(st2, DIMENSION_2); - std::cout << "st2.dimension()=" << st2.dimension() << std::endl; + std::clog << "st2.dimension()=" << st2.dimension() << std::endl; BOOST_CHECK(st2.dimension() == DIMENSION_2); - std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl; + std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl; BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl; + std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl; BOOST_CHECK(st2.num_simplices() == 30); Point_cloud points012; @@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { Min_sphere ms012(dimension, points012.begin(), points012.end()); Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2})); - std::cout << "f012= " << f012 << " | ms012_radius= " << std::sqrt(ms012.squared_radius()) << std::endl; + std::clog << "f012= " << f012 << " | ms012_radius= " << std::sqrt(ms012.squared_radius()) << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::sqrt(ms012.squared_radius())); @@ -140,7 +140,7 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { Min_sphere ms1410(dimension, points1410.begin(), points1410.end()); Simplex_tree::Filtration_value f1410 = st2.filtration(st2.find({1, 4, 10})); - std::cout << "f1410= " << f1410 << " | ms1410_radius= " << std::sqrt(ms1410.squared_radius()) << std::endl; + std::clog << "f1410= " << f1410 << " | ms1410_radius= " << std::sqrt(ms1410.squared_radius()) << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f1410, std::sqrt(ms1410.squared_radius())); @@ -151,7 +151,7 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) { Min_sphere ms469(dimension, points469.begin(), points469.end()); Simplex_tree::Filtration_value f469 = st2.filtration(st2.find({4, 6, 9})); - std::cout << "f469= " << f469 << " | ms469_radius= " << std::sqrt(ms469.squared_radius()) << std::endl; + std::clog << "f469= " << f469 << " | ms469_radius= " << std::sqrt(ms469.squared_radius()) << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f469, std::sqrt(ms469.squared_radius())); @@ -178,35 +178,35 @@ BOOST_AUTO_TEST_CASE(Cech_complex_from_points) { // ---------------------------------------------------------------------------- Cech_complex cech_complex_from_points(points, 2.0); - std::cout << "========== cech_complex_from_points ==========" << std::endl; + std::clog << "========== cech_complex_from_points ==========" << std::endl; Simplex_tree st; const int DIMENSION = 3; cech_complex_from_points.create_complex(st, DIMENSION); // Another way to check num_simplices - std::cout << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl; int num_simplices = 0; for (auto f_simplex : st.filtration_simplex_range()) { num_simplices++; - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " + std::clog << ") -> " << "[" << st.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << std::endl; } BOOST_CHECK(num_simplices == 15); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 15); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION); - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == 4); for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; + std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; switch (st.dimension(f_simplex)) { case 0: GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0); @@ -236,7 +236,7 @@ BOOST_AUTO_TEST_CASE(Cech_create_complex_throw) { // ---------------------------------------------------------------------------- std::string off_file_name("alphacomplexdoc.off"); double max_radius = 12.0; - std::cout << "========== OFF FILE NAME = " << off_file_name << " - Cech max_radius=" << max_radius + std::clog << "========== OFF FILE NAME = " << off_file_name << " - Cech max_radius=" << max_radius << "==========" << std::endl; Gudhi::Points_off_reader off_reader(off_file_name); @@ -245,7 +245,7 @@ BOOST_AUTO_TEST_CASE(Cech_create_complex_throw) { Simplex_tree stree; std::vector simplex = {0, 1, 2}; stree.insert_simplex_and_subfaces(simplex); - std::cout << "Check exception throw in debug mode" << std::endl; + std::clog << "Check exception throw in debug mode" << std::endl; // throw excpt because stree is not empty BOOST_CHECK_THROW(cech_complex_from_file.create_complex(stree, 1), std::invalid_argument); } diff --git a/src/Cech_complex/utilities/cech_persistence.cpp b/src/Cech_complex/utilities/cech_persistence.cpp index 8cfe018b..daea08e2 100644 --- a/src/Cech_complex/utilities/cech_persistence.cpp +++ b/src/Cech_complex/utilities/cech_persistence.cpp @@ -50,8 +50,8 @@ int main(int argc, char* argv[]) { Simplex_tree simplex_tree; cech_complex_from_file.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -85,7 +85,7 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::options_description visible("Allowed options", 100); visible.add_options()("help,h", "produce help message")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-radius,r", po::value(&max_radius)->default_value(std::numeric_limits::infinity()), "Maximal length of an edge for the Cech complex construction.")( @@ -108,17 +108,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Cech complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Cech complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Contraction/example/Garland_heckbert.cpp b/src/Contraction/example/Garland_heckbert.cpp index 9c0b5205..489ef5d0 100644 --- a/src/Contraction/example/Garland_heckbert.cpp +++ b/src/Contraction/example/Garland_heckbert.cpp @@ -147,7 +147,7 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } - std::cout << "Load complex with " << complex.num_vertices() << " vertices" << std::endl; + std::clog << "Load complex with " << complex.num_vertices() << " vertices" << std::endl; int num_contractions = atoi(argv[3]); @@ -158,10 +158,10 @@ int main(int argc, char *argv[]) { Gudhi::contraction::make_link_valid_contraction(), new GH_visitor(complex)); - std::cout << "Contract " << num_contractions << " edges" << std::endl; + std::clog << "Contract " << num_contractions << " edges" << std::endl; contractor.contract_edges(num_contractions); - std::cout << "Final complex has " << + std::clog << "Final complex has " << complex.num_vertices() << " vertices, " << complex.num_edges() << " edges and " << complex.num_triangles() << " triangles." << std::endl; diff --git a/src/Contraction/example/Rips_contraction.cpp b/src/Contraction/example/Rips_contraction.cpp index b5ce06c1..42dd0910 100644 --- a/src/Contraction/example/Rips_contraction.cpp +++ b/src/Contraction/example/Rips_contraction.cpp @@ -52,13 +52,13 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } - std::cout << "Build the Rips complex with " << complex.num_vertices() << " vertices" << std::endl; + std::clog << "Build the Rips complex with " << complex.num_vertices() << " vertices" << std::endl; build_rips(complex, atof(argv[2])); Gudhi::Clock contraction_chrono("Time to simplify and enumerate simplices"); - std::cout << "Initial complex has " << + std::clog << "Initial complex has " << complex.num_vertices() << " vertices and " << complex.num_edges() << " edges" << std::endl; @@ -69,16 +69,16 @@ int main(int argc, char *argv[]) { Gudhi::contraction::make_remove_popable_blockers_visitor()); contractor.contract_edges(); - std::cout << "Counting final number of simplices \n"; + std::clog << "Counting final number of simplices \n"; unsigned num_simplices = std::distance(complex.complex_simplex_range().begin(), complex.complex_simplex_range().end()); - std::cout << "Final complex has " << + std::clog << "Final complex has " << complex.num_vertices() << " vertices, " << complex.num_edges() << " edges, " << complex.num_blockers() << " blockers and " << num_simplices << " simplices" << std::endl; - std::cout << contraction_chrono; + std::clog << contraction_chrono; return EXIT_SUCCESS; } diff --git a/src/Contraction/include/gudhi/Edge_contraction.h b/src/Contraction/include/gudhi/Edge_contraction.h index 6058d64b..6c0f4c78 100644 --- a/src/Contraction/include/gudhi/Edge_contraction.h +++ b/src/Contraction/include/gudhi/Edge_contraction.h @@ -164,13 +164,13 @@ int main (int argc, char *argv[]) std::cerr << "Unable to read file:"<()); contractor.contract_edges(); - std::cout << "Counting final number of simplices \n"; + std::clog << "Counting final number of simplices \n"; unsigned num_simplices = std::distance(complex.star_simplex_range().begin(),complex.star_simplex_range().end()); - std::cout << "Final complex has "<< + std::clog << "Final complex has "<< complex.num_vertices()<<" vertices, "<< complex.num_edges()<<" edges, "<< complex.num_blockers()<<" blockers and "<< num_simplices<<" simplices"< vertices) { if (!load_only_points_) { - // std::cout << "size:" << vertices.size() << std::endl; + // std::clog << "size:" << vertices.size() << std::endl; for (std::size_t i = 0; i < vertices.size(); ++i) for (std::size_t j = i + 1; j < vertices.size(); ++j) complex_.add_edge_without_blockers(Vertex_handle(vertices[i]), Vertex_handle(vertices[j])); @@ -178,7 +178,7 @@ class Model { void contract_edges(unsigned num_contractions) { Gudhi::Clock c; Edge_contractor contractor(complex_, num_contractions); - std::cout << "Time to simplify: " << c.num_seconds() << "s" << std::endl; + std::clog << "Time to simplify: " << c.num_seconds() << "s" << std::endl; } void collapse_vertices(unsigned num_collapses) { @@ -192,14 +192,14 @@ class Model { } void show_graph_stats() { - std::cout << "++++++ Graph stats +++++++" << std::endl; - std::cout << "Num vertices : " << complex_.num_vertices() << std::endl; - std::cout << "Num edges : " << complex_.num_edges() << std::endl; - std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl; - std::cout << "Min/avg/max degree : " << min_degree() << "/" << avg_degree() << "/" << max_degree() << std::endl; - std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl; - std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl; - std::cout << "+++++++++++++++++++++++++" << std::endl; + std::clog << "++++++ Graph stats +++++++" << std::endl; + std::clog << "Num vertices : " << complex_.num_vertices() << std::endl; + std::clog << "Num edges : " << complex_.num_edges() << std::endl; + std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl; + std::clog << "Min/avg/max degree : " << min_degree() << "/" << avg_degree() << "/" << max_degree() << std::endl; + std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl; + std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl; + std::clog << "+++++++++++++++++++++++++" << std::endl; } private: @@ -226,11 +226,11 @@ class Model { public: void show_complex_stats() { - std::cout << "++++++ Mesh stats +++++++" << std::endl; - std::cout << "Num vertices : " << complex_.num_vertices() << std::endl; - std::cout << "Num edges : " << complex_.num_edges() << std::endl; - std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl; - std::cout << "+++++++++++++++++++++++++" << std::endl; + std::clog << "++++++ Mesh stats +++++++" << std::endl; + std::clog << "Num vertices : " << complex_.num_vertices() << std::endl; + std::clog << "Num edges : " << complex_.num_edges() << std::endl; + std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl; + std::clog << "+++++++++++++++++++++++++" << std::endl; } void show_complex_dimension() { @@ -247,18 +247,18 @@ class Model { euler -= 1; } clock.end(); - std::cout << "++++++ Mesh dimension +++++++" << std::endl; - std::cout << "Dimension : " << dimension << std::endl; - std::cout << "Euler characteristic : " << euler << std::endl; - std::cout << "Num simplices : " << num_simplices << std::endl; - std::cout << "Total time: " << clock << std::endl; - std::cout << "Time per simplex: " << clock.num_seconds() / num_simplices << " s" << std::endl; - std::cout << "+++++++++++++++++++++++++" << std::endl; + std::clog << "++++++ Mesh dimension +++++++" << std::endl; + std::clog << "Dimension : " << dimension << std::endl; + std::clog << "Euler characteristic : " << euler << std::endl; + std::clog << "Num simplices : " << num_simplices << std::endl; + std::clog << "Total time: " << clock << std::endl; + std::clog << "Time per simplex: " << clock.num_seconds() / num_simplices << " s" << std::endl; + std::clog << "+++++++++++++++++++++++++" << std::endl; } void show_homology_group() { #ifdef _WIN32 - std::cout << "Works only on linux x64 for the moment\n"; + std::clog << "Works only on linux x64 for the moment\n"; #else Gudhi::Clock clock; run_chomp(); @@ -278,16 +278,16 @@ class Model { else euler -= 1; } - std::cout << "Saw " << num_simplices << " simplices with maximum dimension " << dimension << std::endl; - std::cout << "The euler characteristic is : " << euler << std::endl; + std::clog << "Saw " << num_simplices << " simplices with maximum dimension " << dimension << std::endl; + std::clog << "The euler characteristic is : " << euler << std::endl; } void show_persistence(int p, double threshold, int max_dim, double min_pers) { - Persistence_compute persistence(complex_, std::cout, Persistence_params(p, threshold, max_dim, min_pers)); + Persistence_compute persistence(complex_, std::clog, Persistence_params(p, threshold, max_dim, min_pers)); } void show_critical_points(double max_distance) { - Critical_points critical_points(complex_, std::cout, max_distance); + Critical_points critical_points(complex_, std::clog, max_distance); } void show_is_manifold() { @@ -296,12 +296,12 @@ class Model { Is_manifold test_manifold(complex_, dim, is_manifold); if (is_manifold) { - std::cout << "The complex is a " << dim << "-manifold\n"; + std::clog << "The complex is a " << dim << "-manifold\n"; } else { if (dim < 4) { - std::cout << "The complex has dimension greater than " << dim << " and is not a manifold\n"; + std::clog << "The complex has dimension greater than " << dim << " and is not a manifold\n"; } else { - std::cout << "The complex has dimension>=4 and may or may not be a manifold\n"; + std::clog << "The complex has dimension>=4 and may or may not be a manifold\n"; } } } @@ -309,7 +309,7 @@ class Model { private: void run_chomp() { save_complex_in_file_for_chomp(); - std::cout << "Call CHOMP library\n"; + std::clog << "Call CHOMP library\n"; int returnValue = system("homsimpl chomp.sim"); if (returnValue != 0) { std::cerr << "homsimpl (from CHOMP) failed. Please check it is installed or available in the PATH." diff --git a/src/GudhUI/utils/Bar_code_persistence.h b/src/GudhUI/utils/Bar_code_persistence.h index cd9b009f..b526017a 100644 --- a/src/GudhUI/utils/Bar_code_persistence.h +++ b/src/GudhUI/utils/Bar_code_persistence.h @@ -58,13 +58,13 @@ class Bar_code_persistence { QGraphicsScene * scene = new QGraphicsScene(); view->setScene(scene); double ratio = 600.0 / (max_death - min_birth); - // std::cout << "min_birth=" << min_birth << " - max_death=" << max_death << " - ratio=" << ratio << std::endl; + // std::clog << "min_birth=" << min_birth << " - max_death=" << max_death << " - ratio=" << ratio << std::endl; double height = 0.0, birth = 0.0, death = 0.0; int pers_num = 1; for (auto& persistence : persistence_vector) { height = 5.0 * pers_num; - // std::cout << "[" << pers_num << "] birth=" << persistence.first << " - death=" << persistence.second << std::endl; + // std::clog << "[" << pers_num << "] birth=" << persistence.first << " - death=" << persistence.second << std::endl; if (std::isfinite(persistence.first)) birth = ((persistence.first - min_birth) * ratio) + 50.0; else diff --git a/src/GudhUI/utils/Critical_points.h b/src/GudhUI/utils/Critical_points.h index 32fcf32e..97e58737 100644 --- a/src/GudhUI/utils/Critical_points.h +++ b/src/GudhUI/utils/Critical_points.h @@ -65,7 +65,7 @@ template class Critical_points { void anti_collapse_edges(const std::deque& edges) { unsigned pos = 0; for (Edge e : edges) { - std::cout << "edge " << pos++ << "/" << edges.size() << "\n"; + std::clog << "edge " << pos++ << "/" << edges.size() << "\n"; auto eh = filled_complex_.add_edge_without_blockers(e.first, e.second); int is_contractible(is_link_reducible(eh)); diff --git a/src/GudhUI/utils/Rips_builder.h b/src/GudhUI/utils/Rips_builder.h index aba1a8e4..0300190c 100644 --- a/src/GudhUI/utils/Rips_builder.h +++ b/src/GudhUI/utils/Rips_builder.h @@ -43,13 +43,13 @@ template class Rips_builder { void compute_edges(double alpha) { auto vertices = complex_.vertex_range(); for (auto p = vertices.begin(); p != vertices.end(); ++p) { - std::cout << *p << " "; - std::cout.flush(); + std::clog << *p << " "; + std::clog.flush(); for (auto q = p; ++q != vertices.end(); /**/) if (squared_eucl_distance(complex_.point(*p), complex_.point(*q)) < 4 * alpha * alpha) complex_.add_edge_without_blockers(*p, *q); } - std::cout << std::endl; + std::clog << std::endl; } }; diff --git a/src/GudhUI/view/View_parameter.h b/src/GudhUI/view/View_parameter.h index dfd3aa41..3671f4fb 100644 --- a/src/GudhUI/view/View_parameter.h +++ b/src/GudhUI/view/View_parameter.h @@ -52,13 +52,13 @@ class View_parameter { void change_vertex_mode() { int current_value = vertex_mode; vertex_mode = static_cast (++current_value % V_COUNT); - std::cout << "Vertex mode : "; + std::clog << "Vertex mode : "; switch (vertex_mode) { case V_NONE: - std::cout << "empty\n"; + std::clog << "empty\n"; break; case V_SIMPLE: - std::cout << "simple\n"; + std::clog << "simple\n"; break; default: break; diff --git a/src/Hasse_complex/include/gudhi/Hasse_complex.h b/src/Hasse_complex/include/gudhi/Hasse_complex.h index 209fd0b9..8ce8c36f 100644 --- a/src/Hasse_complex/include/gudhi/Hasse_complex.h +++ b/src/Hasse_complex/include/gudhi/Hasse_complex.h @@ -173,9 +173,9 @@ class Hasse_complex { } void display_simplex(Simplex_handle sh) { - std::cout << dimension(sh) << " "; - for (auto sh_b : boundary_simplex_range(sh)) std::cout << sh_b << " "; - std::cout << " " << filtration(sh) << " key=" << key(sh); + std::clog << dimension(sh) << " "; + for (auto sh_b : boundary_simplex_range(sh)) std::clog << sh_b << " "; + std::clog << " " << filtration(sh) << " key=" << key(sh); } void initialize_filtration() { diff --git a/src/Nerve_GIC/example/CoordGIC.cpp b/src/Nerve_GIC/example/CoordGIC.cpp index fd9c224a..f0afdca5 100644 --- a/src/Nerve_GIC/example/CoordGIC.cpp +++ b/src/Nerve_GIC/example/CoordGIC.cpp @@ -40,7 +40,7 @@ int main(int argc, char **argv) { bool check = GIC.read_point_cloud(off_file_name); if (!check) { - std::cout << "Incorrect OFF file." << std::endl; + std::clog << "Incorrect OFF file." << std::endl; } else { GIC.set_type("GIC"); @@ -67,15 +67,15 @@ int main(int argc, char **argv) { // -------------------------------------------- if (verb) { - std::cout << "Coordinate GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices() + std::clog << "Coordinate GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on coordinate GIC simplices" << std::endl; + std::clog << "Iterator on coordinate GIC simplices" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } } diff --git a/src/Nerve_GIC/example/FuncGIC.cpp b/src/Nerve_GIC/example/FuncGIC.cpp index 5a323795..518e1826 100644 --- a/src/Nerve_GIC/example/FuncGIC.cpp +++ b/src/Nerve_GIC/example/FuncGIC.cpp @@ -41,7 +41,7 @@ int main(int argc, char **argv) { bool check = GIC.read_point_cloud(off_file_name); if (!check) { - std::cout << "Incorrect OFF file." << std::endl; + std::clog << "Incorrect OFF file." << std::endl; } else { GIC.set_type("GIC"); @@ -65,15 +65,15 @@ int main(int argc, char **argv) { // -------------------------------------------- if (verb) { - std::cout << "Functional GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices() + std::clog << "Functional GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on functional GIC simplices" << std::endl; + std::clog << "Iterator on functional GIC simplices" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } } diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h index ba5ddafd..ab099c04 100644 --- a/src/Nerve_GIC/include/gudhi/GIC.h +++ b/src/Nerve_GIC/include/gudhi/GIC.h @@ -407,7 +407,7 @@ class Cover_complex { std::ifstream input(distance, std::ios::out | std::ios::binary); if (input.good()) { - if (verbose) std::cout << "Reading distances..." << std::endl; + if (verbose) std::clog << "Reading distances..." << std::endl; for (int i = 0; i < n; i++) { for (int j = i; j < n; j++) { input.read((char*)&d, 8); @@ -417,12 +417,12 @@ class Cover_complex { } input.close(); } else { - if (verbose) std::cout << "Computing distances..." << std::endl; + if (verbose) std::clog << "Computing distances..." << std::endl; input.close(); std::ofstream output(distance, std::ios::out | std::ios::binary); for (int i = 0; i < n; i++) { int state = (int)floor(100 * (i * 1.0 + 1) / n) % 10; - if (state == 0 && verbose) std::cout << "\r" << state << "%" << std::flush; + if (state == 0 && verbose) std::clog << "\r" << state << "%" << std::flush; for (int j = i; j < n; j++) { double dis = ref_distance(point_cloud[i], point_cloud[j]); distances[i][j] = dis; @@ -431,7 +431,7 @@ class Cover_complex { } } output.close(); - if (verbose) std::cout << std::endl; + if (verbose) std::clog << std::endl; } } @@ -451,8 +451,8 @@ class Cover_complex { m = (std::min)(m, n - 1); double delta = 0; - if (verbose) std::cout << n << " points in R^" << data_dimension << std::endl; - if (verbose) std::cout << "Subsampling " << m << " points" << std::endl; + if (verbose) std::clog << n << " points in R^" << data_dimension << std::endl; + if (verbose) std::clog << "Subsampling " << m << " points" << std::endl; if (distances.size() == 0) compute_pairwise_distances(distance); @@ -487,7 +487,7 @@ class Cover_complex { } #endif - if (verbose) std::cout << "delta = " << delta << std::endl; + if (verbose) std::clog << "delta = " << delta << std::endl; set_graph_from_rips(delta, distance); return delta; } @@ -579,7 +579,7 @@ class Cover_complex { for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei) reso = (std::max)(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] - func[index[boost::target(*ei, one_skeleton)]])); - if (verbose) std::cout << "resolution = " << reso << std::endl; + if (verbose) std::clog << "resolution = " << reso << std::endl; resolution_double = reso; } @@ -589,7 +589,7 @@ class Cover_complex { reso = (std::max)(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] - func[index[boost::target(*ei, one_skeleton)]]) / gain); - if (verbose) std::cout << "resolution = " << reso << std::endl; + if (verbose) std::clog << "resolution = " << reso << std::endl; resolution_double = reso; } @@ -637,7 +637,7 @@ class Cover_complex { minf = (std::min)(minf, func[i]); maxf = (std::max)(maxf, func[i]); } - if (verbose) std::cout << "Min function value = " << minf << " and Max function value = " << maxf << std::endl; + if (verbose) std::clog << "Min function value = " << minf << " and Max function value = " << maxf << std::endl; // Compute cover of im(f) std::vector > intervals; @@ -663,7 +663,7 @@ class Cover_complex { res = intervals.size(); if (verbose) { for (int i = 0; i < res; i++) - std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" + std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" << std::endl; } } else { @@ -681,7 +681,7 @@ class Cover_complex { res = intervals.size(); if (verbose) { for (int i = 0; i < res; i++) - std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" + std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" << std::endl; } } else { // Case we use an integer and a double for the length of the intervals. @@ -698,7 +698,7 @@ class Cover_complex { res = intervals.size(); if (verbose) { for (int i = 0; i < res; i++) - std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" + std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]" << std::endl; } } @@ -715,7 +715,7 @@ class Cover_complex { std::map > preimages; std::map funcstd; - if (verbose) std::cout << "Computing preimages..." << std::endl; + if (verbose) std::clog << "Computing preimages..." << std::endl; for (int i = 0; i < res; i++) { // Find points in the preimage std::pair inter1 = intervals[i]; @@ -764,7 +764,7 @@ class Cover_complex { } #ifdef GUDHI_USE_TBB - if (verbose) std::cout << "Computing connected components (parallelized)..." << std::endl; + if (verbose) std::clog << "Computing connected components (parallelized)..." << std::endl; tbb::mutex covermutex, idmutex; tbb::parallel_for(0, res, [&](int i){ // Compute connected components @@ -800,7 +800,7 @@ class Cover_complex { idmutex.unlock(); }); #else - if (verbose) std::cout << "Computing connected components..." << std::endl; + if (verbose) std::clog << "Computing connected components..." << std::endl; for (int i = 0; i < res; i++) { // Compute connected components Graph G = one_skeleton.create_subgraph(); @@ -894,7 +894,7 @@ class Cover_complex { // Compute the geodesic distances to subsamples with Dijkstra #ifdef GUDHI_USE_TBB - if (verbose) std::cout << "Computing geodesic distances (parallelized)..." << std::endl; + if (verbose) std::clog << "Computing geodesic distances (parallelized)..." << std::endl; tbb::mutex coverMutex; tbb::mutex mindistMutex; tbb::parallel_for(0, m, [&](int i){ int seed = voronoi_subsamples[i]; @@ -916,7 +916,7 @@ class Cover_complex { }); #else for (int i = 0; i < m; i++) { - if (verbose) std::cout << "Computing geodesic distances to seed " << i << "..." << std::endl; + if (verbose) std::clog << "Computing geodesic distances to seed " << i << "..." << std::endl; int seed = voronoi_subsamples[i]; std::vector dmap(n); boost::dijkstra_shortest_paths( @@ -1054,7 +1054,7 @@ class Cover_complex { } graphic << "}"; graphic.close(); - std::cout << mapp << " file generated. It can be visualized with e.g. neato." << std::endl; + std::clog << mapp << " file generated. It can be visualized with e.g. neato." << std::endl; } public: // Create a .txt file that can be compiled with KeplerMapper. @@ -1090,7 +1090,7 @@ class Cover_complex { if (cover_color[simplices[i][0]].first > mask && cover_color[simplices[i][1]].first > mask) graphic << name2id[simplices[i][0]] << " " << name2id[simplices[i][1]] << std::endl; graphic.close(); - std::cout << mapp + std::clog << mapp << " generated. It can be visualized with e.g. python KeplerMapperVisuFromTxtFile.py and firefox." << std::endl; } @@ -1137,7 +1137,7 @@ class Cover_complex { for (int i = 0; i < numfaces; i++) graphic << 3 << " " << faces[i][0] << " " << faces[i][1] << " " << faces[i][2] << std::endl; graphic.close(); - std::cout << mapp << " generated. It can be visualized with e.g. geomview." << std::endl; + std::clog << mapp << " generated. It can be visualized with e.g. geomview." << std::endl; } // ******************************************************************************************************************* @@ -1185,7 +1185,7 @@ class Cover_complex { for (int i = 0; i < max_dim; i++) { std::vector > bars = pcoh.intervals_in_dimension(i); int num_bars = bars.size(); if(i == 0) num_bars -= 1; - if(verbose) std::cout << num_bars << " interval(s) in dimension " << i << ":" << std::endl; + if(verbose) std::clog << num_bars << " interval(s) in dimension " << i << ":" << std::endl; for (int j = 0; j < num_bars; j++) { double birth = bars[j].first; double death = bars[j].second; @@ -1199,7 +1199,7 @@ class Cover_complex { else death = minf + (2 - death) * (maxf - minf); PD.push_back(std::pair(birth, death)); - if (verbose) std::cout << " [" << birth << ", " << death << "]" << std::endl; + if (verbose) std::clog << " [" << birth << ", " << death << "]" << std::endl; } } return PD; @@ -1215,7 +1215,7 @@ class Cover_complex { unsigned int sz = distribution.size(); if (sz < N) { for (unsigned int i = 0; i < N - sz; i++) { - if (verbose) std::cout << "Computing " << i << "th bootstrap, bottleneck distance = "; + if (verbose) std::clog << "Computing " << i << "th bootstrap, bottleneck distance = "; Cover_complex Cboot; Cboot.n = this->n; Cboot.data_dimension = this->data_dimension; Cboot.type = this->type; Cboot.functional_cover = true; @@ -1241,7 +1241,7 @@ class Cover_complex { Cboot.find_simplices(); Cboot.compute_PD(); double db = Gudhi::persistence_diagram::bottleneck_distance(this->PD, Cboot.PD); - if (verbose) std::cout << db << std::endl; + if (verbose) std::clog << db << std::endl; distribution.push_back(db); } @@ -1258,7 +1258,7 @@ class Cover_complex { double compute_distance_from_confidence_level(double alpha) { unsigned int N = distribution.size(); double d = distribution[std::floor(alpha * N)]; - if (verbose) std::cout << "Distance corresponding to confidence " << alpha << " is " << d << std::endl; + if (verbose) std::clog << "Distance corresponding to confidence " << alpha << " is " << d << std::endl; return d; } @@ -1273,7 +1273,7 @@ class Cover_complex { double level = 1; for (unsigned int i = 0; i < N; i++) if (distribution[i] >= d){ level = i * 1.0 / N; break; } - if (verbose) std::cout << "Confidence level of distance " << d << " is " << level << std::endl; + if (verbose) std::clog << "Confidence level of distance " << d << " is " << level << std::endl; return level; } @@ -1286,7 +1286,7 @@ class Cover_complex { double distancemin = (std::numeric_limits::max)(); int N = PD.size(); for (int i = 0; i < N; i++) distancemin = (std::min)(distancemin, 0.5 * std::abs(PD[i].second - PD[i].first)); double p_value = 1 - compute_confidence_level_from_distance(distancemin); - if (verbose) std::cout << "p value = " << p_value << std::endl; + if (verbose) std::clog << "p value = " << p_value << std::endl; return p_value; } diff --git a/src/Nerve_GIC/utilities/Nerve.cpp b/src/Nerve_GIC/utilities/Nerve.cpp index d34e922c..7b09f89d 100644 --- a/src/Nerve_GIC/utilities/Nerve.cpp +++ b/src/Nerve_GIC/utilities/Nerve.cpp @@ -42,7 +42,7 @@ int main(int argc, char **argv) { bool check = SC.read_point_cloud(off_file_name); if (!check) { - std::cout << "Incorrect OFF file." << std::endl; + std::clog << "Incorrect OFF file." << std::endl; } else { SC.set_type("Nerve"); @@ -67,15 +67,15 @@ int main(int argc, char **argv) { // ---------------------------------------------------------------------------- if (verb) { - std::cout << "Nerve is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " + std::clog << "Nerve is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on Nerve simplices" << std::endl; + std::clog << "Iterator on Nerve simplices" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } } diff --git a/src/Nerve_GIC/utilities/VoronoiGIC.cpp b/src/Nerve_GIC/utilities/VoronoiGIC.cpp index 0182c948..117c89fb 100644 --- a/src/Nerve_GIC/utilities/VoronoiGIC.cpp +++ b/src/Nerve_GIC/utilities/VoronoiGIC.cpp @@ -40,7 +40,7 @@ int main(int argc, char **argv) { bool check = GIC.read_point_cloud(off_file_name); if (!check) { - std::cout << "Incorrect OFF file." << std::endl; + std::clog << "Incorrect OFF file." << std::endl; } else { GIC.set_type("GIC"); @@ -61,15 +61,15 @@ int main(int argc, char **argv) { // ---------------------------------------------------------------------------- if (verb) { - std::cout << "Graph induced complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() + std::clog << "Graph induced complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on graph induced complex simplices" << std::endl; + std::clog << "Iterator on graph induced complex simplices" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } } diff --git a/src/Persistence_representations/example/persistence_heat_maps.cpp b/src/Persistence_representations/example/persistence_heat_maps.cpp index 1bf3a637..9fd6779a 100644 --- a/src/Persistence_representations/example/persistence_heat_maps.cpp +++ b/src/Persistence_representations/example/persistence_heat_maps.cpp @@ -65,7 +65,7 @@ int main(int argc, char** argv) { median.compute_median(vector_of_maps); // to compute L^1 distance between hm1 and hm2: - std::cout << "The L^1 distance is : " << hm1.distance(hm2, 1) << std::endl; + std::clog << "The L^1 distance is : " << hm1.distance(hm2, 1) << std::endl; // to average of hm1 and hm2: std::vector to_average; @@ -75,15 +75,15 @@ int main(int argc, char** argv) { av.compute_average(to_average); // to compute scalar product of hm1 and hm2: - std::cout << "Scalar product is : " << hm1.compute_scalar_product(hm2) << std::endl; + std::clog << "Scalar product is : " << hm1.compute_scalar_product(hm2) << std::endl; Persistence_heat_maps hm1k(persistence1, Gaussian_function(1.0)); Persistence_heat_maps hm2k(persistence2, Gaussian_function(1.0)); Persistence_heat_maps hm1i(persistence1, Gaussian_function(1.0), 20, 20, 0, 11, 0, 11); Persistence_heat_maps hm2i(persistence2, Gaussian_function(1.0), 20, 20, 0, 11, 0, 11); - std::cout << "Scalar product computed with exact 2D kernel on grid is : " << hm1i.compute_scalar_product(hm2i) + std::clog << "Scalar product computed with exact 2D kernel on grid is : " << hm1i.compute_scalar_product(hm2i) << std::endl; - std::cout << "Scalar product computed with exact 2D kernel is : " << hm1k.compute_scalar_product(hm2k) << std::endl; + std::clog << "Scalar product computed with exact 2D kernel is : " << hm1k.compute_scalar_product(hm2k) << std::endl; return 0; } diff --git a/src/Persistence_representations/example/persistence_intervals.cpp b/src/Persistence_representations/example/persistence_intervals.cpp index c908581c..748b9ae4 100644 --- a/src/Persistence_representations/example/persistence_intervals.cpp +++ b/src/Persistence_representations/example/persistence_intervals.cpp @@ -18,59 +18,59 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in int main(int argc, char** argv) { if (argc != 2) { - std::cout << "To run this program, please provide the name of a file with persistence diagram \n"; + std::clog << "To run this program, please provide the name of a file with persistence diagram \n"; return 1; } Persistence_intervals p(argv[1]); std::pair min_max_ = p.get_x_range(); - std::cout << "Birth-death range : " << min_max_.first << " " << min_max_.second << std::endl; + std::clog << "Birth-death range : " << min_max_.first << " " << min_max_.second << std::endl; std::vector dominant_ten_intervals_length = p.length_of_dominant_intervals(10); - std::cout << "Length of ten dominant intervals : " << std::endl; + std::clog << "Length of ten dominant intervals : " << std::endl; for (size_t i = 0; i != dominant_ten_intervals_length.size(); ++i) { - std::cout << dominant_ten_intervals_length[i] << std::endl; + std::clog << dominant_ten_intervals_length[i] << std::endl; } std::vector > ten_dominant_intervals = p.dominant_intervals(10); - std::cout << "Here are the dominant intervals : " << std::endl; + std::clog << "Here are the dominant intervals : " << std::endl; for (size_t i = 0; i != ten_dominant_intervals.size(); ++i) { - std::cout << "( " << ten_dominant_intervals[i].first << "," << ten_dominant_intervals[i].second << std::endl; + std::clog << "( " << ten_dominant_intervals[i].first << "," << ten_dominant_intervals[i].second << std::endl; } std::vector histogram = p.histogram_of_lengths(10); - std::cout << "Here is the histogram of barcode's length : " << std::endl; + std::clog << "Here is the histogram of barcode's length : " << std::endl; for (size_t i = 0; i != histogram.size(); ++i) { - std::cout << histogram[i] << " "; + std::clog << histogram[i] << " "; } - std::cout << std::endl; + std::clog << std::endl; std::vector cumulative_histogram = p.cumulative_histogram_of_lengths(10); - std::cout << "Cumulative histogram : " << std::endl; + std::clog << "Cumulative histogram : " << std::endl; for (size_t i = 0; i != cumulative_histogram.size(); ++i) { - std::cout << cumulative_histogram[i] << " "; + std::clog << cumulative_histogram[i] << " "; } - std::cout << std::endl; + std::clog << std::endl; std::vector char_funct_diag = p.characteristic_function_of_diagram(min_max_.first, min_max_.second); - std::cout << "Characteristic function of diagram : " << std::endl; + std::clog << "Characteristic function of diagram : " << std::endl; for (size_t i = 0; i != char_funct_diag.size(); ++i) { - std::cout << char_funct_diag[i] << " "; + std::clog << char_funct_diag[i] << " "; } - std::cout << std::endl; + std::clog << std::endl; std::vector cumul_char_funct_diag = p.cumulative_characteristic_function_of_diagram(min_max_.first, min_max_.second); - std::cout << "Cumulative characteristic function of diagram : " << std::endl; + std::clog << "Cumulative characteristic function of diagram : " << std::endl; for (size_t i = 0; i != cumul_char_funct_diag.size(); ++i) { - std::cout << cumul_char_funct_diag[i] << " "; + std::clog << cumul_char_funct_diag[i] << " "; } - std::cout << std::endl; + std::clog << std::endl; - std::cout << "Persistence Betti numbers \n"; + std::clog << "Persistence Betti numbers \n"; std::vector > pbns = p.compute_persistent_betti_numbers(); for (size_t i = 0; i != pbns.size(); ++i) { - std::cout << pbns[i].first << " " << pbns[i].second << std::endl; + std::clog << pbns[i].first << " " << pbns[i].second << std::endl; } return 0; diff --git a/src/Persistence_representations/example/persistence_landscape.cpp b/src/Persistence_representations/example/persistence_landscape.cpp index ff18d105..d39ae0b8 100644 --- a/src/Persistence_representations/example/persistence_landscape.cpp +++ b/src/Persistence_representations/example/persistence_landscape.cpp @@ -37,35 +37,35 @@ int main(int argc, char** argv) { Persistence_landscape l2(persistence2); // This is how to compute integral of landscapes: - std::cout << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl; - std::cout << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl; + std::clog << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl; + std::clog << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl; // And here how to write landscapes to stream: - std::cout << "l1 : " << l1 << std::endl; - std::cout << "l2 : " << l2 << std::endl; + std::clog << "l1 : " << l1 << std::endl; + std::clog << "l2 : " << l2 << std::endl; // Arithmetic operations on landscapes: Persistence_landscape sum = l1 + l2; - std::cout << "sum : " << sum << std::endl; + std::clog << "sum : " << sum << std::endl; // here are the maxima of the functions: - std::cout << "Maximum of l1 : " << l1.compute_maximum() << std::endl; - std::cout << "Maximum of l2 : " << l2.compute_maximum() << std::endl; + std::clog << "Maximum of l1 : " << l1.compute_maximum() << std::endl; + std::clog << "Maximum of l2 : " << l2.compute_maximum() << std::endl; // here are the norms of landscapes: - std::cout << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl; - std::cout << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl; + std::clog << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl; + std::clog << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl; // here is the average of landscapes: Persistence_landscape average; average.compute_average({&l1, &l2}); - std::cout << "average : " << average << std::endl; + std::clog << "average : " << average << std::endl; // here is the distance of landscapes: - std::cout << "Distance : " << l1.distance(l2) << std::endl; + std::clog << "Distance : " << l1.distance(l2) << std::endl; // here is the scalar product of landscapes: - std::cout << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl; + std::clog << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl; // here is how to create a file which is suitable for visualization via gnuplot: average.plot("average_landscape"); diff --git a/src/Persistence_representations/example/persistence_landscape_on_grid.cpp b/src/Persistence_representations/example/persistence_landscape_on_grid.cpp index 16a58e1d..6d58e167 100644 --- a/src/Persistence_representations/example/persistence_landscape_on_grid.cpp +++ b/src/Persistence_representations/example/persistence_landscape_on_grid.cpp @@ -37,31 +37,31 @@ int main(int argc, char** argv) { Persistence_landscape_on_grid l2(persistence2, 0, 11, 20); // This is how to compute integral of landscapes: - std::cout << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl; - std::cout << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl; + std::clog << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl; + std::clog << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl; // And here how to write landscapes to stream: - std::cout << "l1 : " << l1 << std::endl; - std::cout << "l2 : " << l2 << std::endl; + std::clog << "l1 : " << l1 << std::endl; + std::clog << "l2 : " << l2 << std::endl; // here are the maxima of the functions: - std::cout << "Maximum of l1 : " << l1.compute_maximum() << std::endl; - std::cout << "Maximum of l2 : " << l2.compute_maximum() << std::endl; + std::clog << "Maximum of l1 : " << l1.compute_maximum() << std::endl; + std::clog << "Maximum of l2 : " << l2.compute_maximum() << std::endl; // here are the norms of landscapes: - std::cout << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl; - std::cout << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl; + std::clog << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl; + std::clog << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl; // here is the average of landscapes: Persistence_landscape_on_grid average; average.compute_average({&l1, &l2}); - std::cout << "average : " << average << std::endl; + std::clog << "average : " << average << std::endl; // here is the distance of landscapes: - std::cout << "Distance : " << l1.distance(l2) << std::endl; + std::clog << "Distance : " << l1.distance(l2) << std::endl; // here is the scalar product of landscapes: - std::cout << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl; + std::clog << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl; // here is how to create a file which is suitable for visualization via gnuplot: average.plot("average_landscape"); diff --git a/src/Persistence_representations/example/persistence_vectors.cpp b/src/Persistence_representations/example/persistence_vectors.cpp index b27e52d2..89e2fb83 100644 --- a/src/Persistence_representations/example/persistence_vectors.cpp +++ b/src/Persistence_representations/example/persistence_vectors.cpp @@ -41,19 +41,19 @@ int main(int argc, char** argv) { Vector_distances_in_diagram v2(persistence2, std::numeric_limits::max()); // writing to a stream: - std::cout << "v1 : " << v1 << std::endl; - std::cout << "v2 : " << v2 << std::endl; + std::clog << "v1 : " << v1 << std::endl; + std::clog << "v2 : " << v2 << std::endl; // averages: Vector_distances_in_diagram average; average.compute_average({&v1, &v2}); - std::cout << "Average : " << average << std::endl; + std::clog << "Average : " << average << std::endl; // computations of distances: - std::cout << "l^1 distance : " << v1.distance(v2) << std::endl; + std::clog << "l^1 distance : " << v1.distance(v2) << std::endl; // computations of scalar product: - std::cout << "Scalar product of l1 and l2 : " << v1.compute_scalar_product(v2) << std::endl; + std::clog << "Scalar product of l1 and l2 : " << v1.compute_scalar_product(v2) << std::endl; // create a file with a gnuplot script: v1.plot("plot_of_vector_representation"); diff --git a/src/Persistence_representations/example/sliced_wasserstein.cpp b/src/Persistence_representations/example/sliced_wasserstein.cpp index d5414d00..d4e31ebf 100644 --- a/src/Persistence_representations/example/sliced_wasserstein.cpp +++ b/src/Persistence_representations/example/sliced_wasserstein.cpp @@ -38,10 +38,10 @@ int main(int argc, char** argv) { SW swex1(persistence1, 1, -1); SW swex2(persistence2, 1, -1); - std::cout << "Approx SW kernel: " << sw1.compute_scalar_product(sw2) << std::endl; - std::cout << "Exact SW kernel: " << swex1.compute_scalar_product(swex2) << std::endl; - std::cout << "Distance induced by approx SW kernel: " << sw1.distance(sw2) << std::endl; - std::cout << "Distance induced by exact SW kernel: " << swex1.distance(swex2) << std::endl; + std::clog << "Approx SW kernel: " << sw1.compute_scalar_product(sw2) << std::endl; + std::clog << "Exact SW kernel: " << swex1.compute_scalar_product(swex2) << std::endl; + std::clog << "Distance induced by approx SW kernel: " << sw1.distance(sw2) << std::endl; + std::clog << "Distance induced by exact SW kernel: " << swex1.distance(swex2) << std::endl; return 0; } diff --git a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h index c0aee9d0..fab88489 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h +++ b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h @@ -55,9 +55,9 @@ std::vector > create_Gaussian_filter(size_t pixel_radius, do } if (dbg) { - std::cout << "Kernel initialize \n"; - std::cout << "pixel_radius : " << pixel_radius << std::endl; - std::cout << "kernel.size() : " << kernel.size() << std::endl; + std::clog << "Kernel initialize \n"; + std::clog << "pixel_radius : " << pixel_radius << std::endl; + std::clog << "kernel.size() : " << kernel.size() << std::endl; getchar(); } @@ -79,12 +79,12 @@ std::vector > create_Gaussian_filter(size_t pixel_radius, do } if (dbg) { - std::cout << "Here is the kernel : \n"; + std::clog << "Here is the kernel : \n"; for (size_t i = 0; i != kernel.size(); ++i) { for (size_t j = 0; j != kernel[i].size(); ++j) { - std::cout << kernel[i][j] << " "; + std::clog << kernel[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; } } return kernel; @@ -290,16 +290,16 @@ class Persistence_heat_maps { bool dbg = false; if (this->heat_map.size() != second.heat_map.size()) { if (dbg) - std::cout << "this->heat_map.size() : " << this->heat_map.size() + std::clog << "this->heat_map.size() : " << this->heat_map.size() << " \n second.heat_map.size() : " << second.heat_map.size() << std::endl; return false; } if (this->min_ != second.min_) { - if (dbg) std::cout << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl; + if (dbg) std::clog << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl; return false; } if (this->max_ != second.max_) { - if (dbg) std::cout << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl; + if (dbg) std::clog << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl; return false; } // in the other case we may assume that the persistence images are defined on the same domain. @@ -322,15 +322,15 @@ class Persistence_heat_maps { bool operator==(const Persistence_heat_maps& rhs) const { bool dbg = false; if (!this->check_if_the_same(rhs)) { - if (dbg) std::cout << "The domains are not the same \n"; + if (dbg) std::clog << "The domains are not the same \n"; return false; // in this case, the domains are not the same, so the maps cannot be the same. } for (size_t i = 0; i != this->heat_map.size(); ++i) { for (size_t j = 0; j != this->heat_map[i].size(); ++j) { if (!almost_equal(this->heat_map[i][j], rhs.heat_map[i][j])) { if (dbg) { - std::cout << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl; - std::cout << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl; + std::clog << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl; + std::clog << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl; } return false; } @@ -586,14 +586,14 @@ void Persistence_heat_maps::construct(const std::vectorf = f; - if (dbg) std::cout << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl; + if (dbg) std::clog << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl; if (min_ == max_) { - if (dbg) std::cout << "min and max parameters will be determined based on intervals \n"; + if (dbg) std::clog << "min and max parameters will be determined based on intervals \n"; // in this case, we want the program to set up the min_ and max_ values by itself. min_ = std::numeric_limits::max(); max_ = -std::numeric_limits::max(); @@ -611,9 +611,9 @@ void Persistence_heat_maps::construct(const std::vector::construct(const std::vectorheat_map = heat_map_; - if (dbg) std::cout << "Done creating of the heat map, now we will fill in the structure \n"; + if (dbg) std::clog << "Done creating of the heat map, now we will fill in the structure \n"; for (size_t pt_nr = 0; pt_nr != intervals_.size(); ++pt_nr) { // compute the value of intervals_[pt_nr] in the grid: @@ -638,9 +638,9 @@ void Persistence_heat_maps::construct(const std::vector((intervals_[pt_nr].second - this->min_) / (this->max_ - this->min_) * number_of_pixels); if (dbg) { - std::cout << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl; - std::cout << "x_grid : " << x_grid << std::endl; - std::cout << "y_grid : " << y_grid << std::endl; + std::clog << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl; + std::clog << "x_grid : " << x_grid << std::endl; + std::clog << "y_grid : " << y_grid << std::endl; } // x_grid and y_grid gives a center of the kernel. We want to have its lower left corner. To get this, we need to @@ -650,9 +650,9 @@ void Persistence_heat_maps::construct(const std::vectorf(intervals_[pt_nr]); @@ -663,11 +663,11 @@ void Persistence_heat_maps::construct(const std::vector= 0) && (x_grid + i < this->heat_map.size()) && ((y_grid + j) >= 0) && (y_grid + j < this->heat_map.size())) { if (dbg) { - std::cout << y_grid + j << " " << x_grid + i << std::endl; + std::clog << y_grid + j << " " << x_grid + i << std::endl; } this->heat_map[y_grid + j][x_grid + i] += scaling_value * filter[i][j]; if (dbg) { - std::cout << "Position : (" << x_grid + i << "," << y_grid + j + std::clog << "Position : (" << x_grid + i << "," << y_grid + j << ") got increased by the value : " << filter[i][j] << std::endl; } } @@ -805,7 +805,7 @@ void Persistence_heat_maps::plot(const char* filename) cons out << std::endl; } out.close(); - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; } @@ -842,7 +842,7 @@ void Persistence_heat_maps::load_from_file(const char* file in >> this->min_ >> this->max_; if (dbg) { - std::cout << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl; + std::clog << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl; } std::string temp; @@ -859,18 +859,18 @@ void Persistence_heat_maps::load_from_file(const char* file lineSS >> point; line_of_heat_map.push_back(point); if (dbg) { - std::cout << point << " "; + std::clog << point << " "; } } if (dbg) { - std::cout << std::endl; + std::clog << std::endl; getchar(); } if (in.good()) this->heat_map.push_back(line_of_heat_map); } in.close(); - if (dbg) std::cout << "Done \n"; + if (dbg) std::clog << "Done \n"; } // Concretizations of virtual methods: diff --git a/src/Persistence_representations/include/gudhi/Persistence_intervals.h b/src/Persistence_representations/include/gudhi/Persistence_intervals.h index f02e930e..a6c1d6f0 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_intervals.h +++ b/src/Persistence_representations/include/gudhi/Persistence_intervals.h @@ -185,7 +185,7 @@ class Persistence_intervals { out.close(); - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; } @@ -293,7 +293,7 @@ std::vector > Persistence_intervals::dominant_interval for (size_t i = 0; i != std::min(where_to_cut, position_length_vector.size()); ++i) { result.push_back(this->intervals[position_length_vector[i].first]); if (dbg) - std::cout << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second + std::clog << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second << std::endl; } @@ -303,7 +303,7 @@ std::vector > Persistence_intervals::dominant_interval std::vector Persistence_intervals::histogram_of_lengths(size_t number_of_bins) const { bool dbg = false; - if (dbg) std::cout << "this->intervals.size() : " << this->intervals.size() << std::endl; + if (dbg) std::clog << "this->intervals.size() : " << this->intervals.size() << std::endl; // first find the length of the longest interval: double lengthOfLongest = 0; for (size_t i = 0; i != this->intervals.size(); ++i) { @@ -313,7 +313,7 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of } if (dbg) { - std::cout << "lengthOfLongest : " << lengthOfLongest << std::endl; + std::clog << "lengthOfLongest : " << lengthOfLongest << std::endl; } // this is a container we will use to store the resulting histogram @@ -330,10 +330,10 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of ++result[position]; if (dbg) { - std::cout << "i : " << i << std::endl; - std::cout << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n"; - std::cout << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl; - std::cout << "position : " << position << std::endl; + std::clog << "i : " << i << std::endl; + std::clog << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n"; + std::clog << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl; + std::clog << "position : " << position << std::endl; getchar(); } } @@ -342,7 +342,7 @@ std::vector Persistence_intervals::histogram_of_lengths(size_t number_of result.resize(number_of_bins); if (dbg) { - for (size_t i = 0; i != result.size(); ++i) std::cout << result[i] << std::endl; + for (size_t i = 0; i != result.size(); ++i) std::clog << result[i] << std::endl; } return result; } @@ -368,7 +368,7 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do for (size_t i = 0; i != this->intervals.size(); ++i) { if (dbg) { - std::cout << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; + std::clog << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; } size_t beginIt = 0; @@ -390,8 +390,8 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do } if (dbg) { - std::cout << "beginIt : " << beginIt << std::endl; - std::cout << "endIt : " << endIt << std::endl; + std::clog << "beginIt : " << beginIt << std::endl; + std::clog << "endIt : " << endIt << std::endl; } for (size_t pos = beginIt; pos != endIt; ++pos) { @@ -399,11 +399,11 @@ std::vector Persistence_intervals::characteristic_function_of_diagram(do (this->intervals[i].second - this->intervals[i].first); } if (dbg) { - std::cout << "Result at this stage \n"; + std::clog << "Result at this stage \n"; for (size_t aa = 0; aa != result.size(); ++aa) { - std::cout << result[aa] << " "; + std::clog << result[aa] << " "; } - std::cout << std::endl; + std::clog << std::endl; } } return result; @@ -455,9 +455,9 @@ inline double compute_euclidean_distance(const std::pair& f, con std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) const { bool dbg = false; if (dbg) { - std::cout << "Here are the intervals : \n"; + std::clog << "Here are the intervals : \n"; for (size_t i = 0; i != this->intervals.size(); ++i) { - std::cout << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n"; + std::clog << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n"; } getchar(); } @@ -486,12 +486,12 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) distances_from_diagonal[i] = distanceToDiagonal; if (dbg) { - std::cout << "Here are the distances form the point : [" << this->intervals[i].first << " , " + std::clog << "Here are the distances form the point : [" << this->intervals[i].first << " , " << this->intervals[i].second << "] in the diagram \n"; for (size_t aa = 0; aa != distancesFromI.size(); ++aa) { - std::cout << "To : " << i + aa << " : " << distancesFromI[aa] << " "; + std::clog << "To : " << i + aa << " : " << distancesFromI[aa] << " "; } - std::cout << std::endl; + std::clog << std::endl; getchar(); } @@ -502,18 +502,18 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) } } if (dbg) { - std::cout << "Here is the distance matrix : \n"; + std::clog << "Here is the distance matrix : \n"; for (size_t i = 0; i != distances.size(); ++i) { for (size_t j = 0; j != distances.size(); ++j) { - std::cout << distances[i][j] << " "; + std::clog << distances[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; } - std::cout << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl; + std::clog << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl; for (size_t i = 0; i != distances_from_diagonal.size(); ++i) { - std::cout << distances_from_diagonal[i] << " "; + std::clog << distances_from_diagonal[i] << " "; } - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; getchar(); } @@ -526,13 +526,13 @@ std::vector Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) if (k > distancesFromI.size()) { if (dbg) { - std::cout << "There are not enough neighbors in your set. We set the result to plus infty \n"; + std::clog << "There are not enough neighbors in your set. We set the result to plus infty \n"; } result.push_back(std::numeric_limits::max()); } else { if (distances_from_diagonal[i] > distancesFromI[k]) { if (dbg) { - std::cout << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n"; + std::clog << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n"; } result.push_back(distances_from_diagonal[i]); } else { diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape.h b/src/Persistence_representations/include/gudhi/Persistence_landscape.h index dc93bb49..ce4065b8 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_landscape.h +++ b/src/Persistence_representations/include/gudhi/Persistence_landscape.h @@ -343,7 +343,7 @@ class Persistence_landscape { bool dbg = false; if (dbg) { - std::cout << "to_average.size() : " << to_average.size() << std::endl; + std::clog << "to_average.size() : " << to_average.size() << std::endl; } std::vector nextLevelMerge(to_average.size()); @@ -357,13 +357,13 @@ class Persistence_landscape { while (nextLevelMerge.size() != 1) { if (dbg) { - std::cout << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl; + std::clog << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl; } std::vector nextNextLevelMerge; nextNextLevelMerge.reserve(to_average.size()); for (size_t i = 0; i < nextLevelMerge.size(); i = i + 2) { if (dbg) { - std::cout << "i : " << i << std::endl; + std::clog << "i : " << i << std::endl; } Persistence_landscape* l = new Persistence_landscape; if (i + 1 != nextLevelMerge.size()) { @@ -374,7 +374,7 @@ class Persistence_landscape { nextNextLevelMerge.push_back(l); } if (dbg) { - std::cout << "After this iteration \n"; + std::clog << "After this iteration \n"; getchar(); } @@ -471,25 +471,25 @@ Persistence_landscape::Persistence_landscape(const char* filename, size_t dimens bool operatorEqualDbg = false; bool Persistence_landscape::operator==(const Persistence_landscape& rhs) const { if (this->land.size() != rhs.land.size()) { - if (operatorEqualDbg) std::cout << "1\n"; + if (operatorEqualDbg) std::clog << "1\n"; return false; } for (size_t level = 0; level != this->land.size(); ++level) { if (this->land[level].size() != rhs.land[level].size()) { - if (operatorEqualDbg) std::cout << "this->land[level].size() : " << this->land[level].size() << "\n"; - if (operatorEqualDbg) std::cout << "rhs.land[level].size() : " << rhs.land[level].size() << "\n"; - if (operatorEqualDbg) std::cout << "2\n"; + if (operatorEqualDbg) std::clog << "this->land[level].size() : " << this->land[level].size() << "\n"; + if (operatorEqualDbg) std::clog << "rhs.land[level].size() : " << rhs.land[level].size() << "\n"; + if (operatorEqualDbg) std::clog << "2\n"; return false; } for (size_t i = 0; i != this->land[level].size(); ++i) { if (!(almost_equal(this->land[level][i].first, rhs.land[level][i].first) && almost_equal(this->land[level][i].second, rhs.land[level][i].second))) { if (operatorEqualDbg) - std::cout << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second + std::clog << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second << "\n"; if (operatorEqualDbg) - std::cout << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n"; - if (operatorEqualDbg) std::cout << "3\n"; + std::clog << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n"; + if (operatorEqualDbg) std::clog << "3\n"; return false; } } @@ -507,7 +507,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( const std::vector >& p, size_t number_of_levels) { bool dbg = false; if (dbg) { - std::cout << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )" + std::clog << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )" << std::endl; } @@ -517,9 +517,9 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( std::sort(bars.begin(), bars.end(), compare_points_sorting); if (dbg) { - std::cout << "Bars : \n"; + std::clog << "Bars : \n"; for (size_t i = 0; i != bars.size(); ++i) { - std::cout << bars[i].first << " " << bars[i].second << "\n"; + std::clog << bars[i].first << " " << bars[i].second << "\n"; } getchar(); } @@ -534,7 +534,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( while (!characteristicPoints.empty()) { if (dbg) { for (size_t i = 0; i != characteristicPoints.size(); ++i) { - std::cout << "(" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; + std::clog << "(" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; } std::cin.ignore(); } @@ -545,7 +545,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( lambda_n.push_back(characteristicPoints[0]); if (dbg) { - std::cout << "1 Adding to lambda_n : (" << -std::numeric_limits::max() << " " << 0 << ") , (" + std::clog << "1 Adding to lambda_n : (" << -std::numeric_limits::max() << " " << 0 << ") , (" << minus_length(characteristicPoints[0]) << " " << 0 << ") , (" << characteristicPoints[0].first << " " << characteristicPoints[0].second << ") \n"; } @@ -562,13 +562,13 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(lambda_n[lambda_n.size() - 1]) - minus_length(characteristicPoints[i])) / 2); lambda_n.push_back(point); if (dbg) { - std::cout << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n"; + std::clog << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n"; } if (dbg) { - std::cout << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " + std::clog << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << "\n"; - std::cout << "point : " << point.first << " " << point.second << "\n"; + std::clog << "point : " << point.first << " " << point.second << "\n"; getchar(); } @@ -577,7 +577,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(point) <= birth_plus_deaths(characteristicPoints[i + p]))) { newCharacteristicPoints.push_back(characteristicPoints[i + p]); if (dbg) { - std::cout << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " + std::clog << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << ")\n"; getchar(); } @@ -586,7 +586,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( newCharacteristicPoints.push_back(point); if (dbg) { - std::cout << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n"; + std::clog << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n"; } while ((i + p < characteristicPoints.size()) && @@ -594,15 +594,15 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( (birth_plus_deaths(point) >= birth_plus_deaths(characteristicPoints[i + p]))) { newCharacteristicPoints.push_back(characteristicPoints[i + p]); if (dbg) { - std::cout << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " + std::clog << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << "\n"; - std::cout << "point : " << point.first << " " << point.second << "\n"; - std::cout << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p]) + std::clog << "point : " << point.first << " " << point.second << "\n"; + std::clog << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p]) << " , " << birth_plus_deaths(characteristicPoints[i + p]) << "\n"; - std::cout << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point) + std::clog << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point) << "\n"; - std::cout << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " + std::clog << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " " << characteristicPoints[i + p].second << ")\n"; getchar(); } @@ -613,20 +613,20 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode( lambda_n.push_back(std::make_pair(birth_plus_deaths(lambda_n[lambda_n.size() - 1]), 0)); lambda_n.push_back(std::make_pair(minus_length(characteristicPoints[i]), 0)); if (dbg) { - std::cout << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0 + std::clog << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0 << ")\n"; - std::cout << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n"; + std::clog << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n"; } } lambda_n.push_back(characteristicPoints[i]); if (dbg) { - std::cout << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " " + std::clog << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; } } else { newCharacteristicPoints.push_back(characteristicPoints[i]); if (dbg) { - std::cout << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " " + std::clog << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n"; } } @@ -692,7 +692,7 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const { double result = 0; for (size_t i = 0; i != this->land.size(); ++i) { for (size_t nr = 2; nr != this->land[i].size() - 1; ++nr) { - if (dbg) std::cout << "nr : " << nr << "\n"; + if (dbg) std::clog << "nr : " << nr << "\n"; // In this interval, the landscape has a form f(x) = ax+b. We want to compute integral of (ax+b)^p = 1/a * // (ax+b)^{p+1}/(p+1) std::pair coef = compute_parameters_of_a_line(this->land[i][nr], this->land[i][nr - 1]); @@ -700,7 +700,7 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const { double b = coef.second; if (dbg) - std::cout << "(" << this->land[i][nr].first << "," << this->land[i][nr].second << ") , " + std::clog << "(" << this->land[i][nr].first << "," << this->land[i][nr].second << ") , " << this->land[i][nr - 1].first << "," << this->land[i][nr].second << ")" << std::endl; if (this->land[i][nr].first == this->land[i][nr - 1].first) continue; if (a != 0) { @@ -710,8 +710,8 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const { result += (this->land[i][nr].first - this->land[i][nr - 1].first) * (pow(this->land[i][nr].second, p)); } if (dbg) { - std::cout << "a : " << a << " , b : " << b << std::endl; - std::cout << "result : " << result << std::endl; + std::clog << "a : " << a << " , b : " << b << std::endl; + std::clog << "result : " << result << std::endl; } } } @@ -730,31 +730,31 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou unsigned coordEnd = this->land[level].size() - 2; if (compute_value_at_a_given_pointDbg) { - std::cout << "Here \n"; - std::cout << "x : " << x << "\n"; - std::cout << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; - std::cout << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; + std::clog << "Here \n"; + std::clog << "x : " << x << "\n"; + std::clog << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; + std::clog << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; } // in this case x is outside the support of the landscape, therefore the value of the landscape is 0. if (x <= this->land[level][coordBegin].first) return 0; if (x >= this->land[level][coordEnd].first) return 0; - if (compute_value_at_a_given_pointDbg) std::cout << "Entering to the while loop \n"; + if (compute_value_at_a_given_pointDbg) std::clog << "Entering to the while loop \n"; while (coordBegin + 1 != coordEnd) { if (compute_value_at_a_given_pointDbg) { - std::cout << "coordBegin : " << coordBegin << "\n"; - std::cout << "coordEnd : " << coordEnd << "\n"; - std::cout << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; - std::cout << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; + std::clog << "coordBegin : " << coordBegin << "\n"; + std::clog << "coordEnd : " << coordEnd << "\n"; + std::clog << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n"; + std::clog << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n"; } unsigned newCord = (unsigned)floor((coordEnd + coordBegin) / 2.0); if (compute_value_at_a_given_pointDbg) { - std::cout << "newCord : " << newCord << "\n"; - std::cout << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n"; + std::clog << "newCord : " << newCord << "\n"; + std::clog << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n"; std::cin.ignore(); } @@ -767,12 +767,12 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou } if (compute_value_at_a_given_pointDbg) { - std::cout << "x : " << x << " is between : " << this->land[level][coordBegin].first << " a " + std::clog << "x : " << x << " is between : " << this->land[level][coordBegin].first << " a " << this->land[level][coordEnd].first << "\n"; - std::cout << "the y coords are : " << this->land[level][coordBegin].second << " a " + std::clog << "the y coords are : " << this->land[level][coordBegin].second << " a " << this->land[level][coordEnd].second << "\n"; - std::cout << "coordBegin : " << coordBegin << "\n"; - std::cout << "coordEnd : " << coordEnd << "\n"; + std::clog << "coordBegin : " << coordBegin << "\n"; + std::clog << "coordEnd : " << coordEnd << "\n"; std::cin.ignore(); } return function_value(this->land[level][coordBegin], this->land[level][coordEnd], x); @@ -810,13 +810,13 @@ Persistence_landscape Persistence_landscape::abs() { Persistence_landscape result; for (size_t level = 0; level != this->land.size(); ++level) { if (AbsDbg) { - std::cout << "level: " << level << std::endl; + std::clog << "level: " << level << std::endl; } std::vector > lambda_n; lambda_n.push_back(std::make_pair(-std::numeric_limits::max(), 0)); for (size_t i = 1; i != this->land[level].size(); ++i) { if (AbsDbg) { - std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " " + std::clog << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " " << this->land[level][i].second << std::endl; } // if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to @@ -828,15 +828,15 @@ Persistence_landscape Persistence_landscape::abs() { lambda_n.push_back(std::make_pair(zero, 0)); lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second))); if (AbsDbg) { - std::cout << "Adding pair : (" << zero << ",0)" << std::endl; - std::cout << "In the same step adding pair : (" << this->land[level][i].first << "," + std::clog << "Adding pair : (" << zero << ",0)" << std::endl; + std::clog << "In the same step adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) << ") " << std::endl; std::cin.ignore(); } } else { lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second))); if (AbsDbg) { - std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) + std::clog << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) << ") " << std::endl; std::cin.ignore(); } @@ -851,13 +851,13 @@ Persistence_landscape* Persistence_landscape::new_abs() { Persistence_landscape* result = new Persistence_landscape(*this); for (size_t level = 0; level != this->land.size(); ++level) { if (AbsDbg) { - std::cout << "level: " << level << std::endl; + std::clog << "level: " << level << std::endl; } std::vector > lambda_n; lambda_n.push_back(std::make_pair(-std::numeric_limits::max(), 0)); for (size_t i = 1; i != this->land[level].size(); ++i) { if (AbsDbg) { - std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " " + std::clog << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " " << this->land[level][i].second << std::endl; } // if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to @@ -869,15 +869,15 @@ Persistence_landscape* Persistence_landscape::new_abs() { lambda_n.push_back(std::make_pair(zero, 0)); lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second))); if (AbsDbg) { - std::cout << "Adding pair : (" << zero << ",0)" << std::endl; - std::cout << "In the same step adding pair : (" << this->land[level][i].first << "," + std::clog << "Adding pair : (" << zero << ",0)" << std::endl; + std::clog << "In the same step adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) << ") " << std::endl; std::cin.ignore(); } } else { lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second))); if (AbsDbg) { - std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) + std::clog << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second) << ") " << std::endl; std::cin.ignore(); } @@ -943,11 +943,11 @@ void Persistence_landscape::load_landscape_from_file(const char* filename) { lineSS >> endd; landscapeAtThisLevel.push_back(std::make_pair(beginn, endd)); if (dbg) { - std::cout << "Reading a point : " << beginn << " , " << endd << std::endl; + std::clog << "Reading a point : " << beginn << " , " << endd << std::endl; } } else { if (dbg) { - std::cout << "IGNORE LINE\n"; + std::clog << "IGNORE LINE\n"; getchar(); } if (!isThisAFirsLine) { @@ -975,7 +975,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap const Persistence_landscape& land2) { bool operation_on_pair_of_landscapesDBG = false; if (operation_on_pair_of_landscapesDBG) { - std::cout << "operation_on_pair_of_landscapes\n"; + std::clog << "operation_on_pair_of_landscapes\n"; std::cin.ignore(); } Persistence_landscape result; @@ -985,8 +985,8 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap if (operation_on_pair_of_landscapesDBG) { for (size_t i = 0; i != std::min(land1.land.size(), land2.land.size()); ++i) { - std::cout << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; - std::cout << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; + std::clog << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; + std::clog << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; } getchar(); } @@ -997,20 +997,20 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap size_t q = 0; while ((p + 1 < land1.land[i].size()) && (q + 1 < land2.land[i].size())) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "p : " << p << "\n"; - std::cout << "q : " << q << "\n"; - std::cout << "land1.land.size() : " << land1.land.size() << std::endl; - std::cout << "land2.land.size() : " << land2.land.size() << std::endl; - std::cout << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; - std::cout << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; - std::cout << "land1.land[i][p].first : " << land1.land[i][p].first << "\n"; - std::cout << "land2.land[i][q].first : " << land2.land[i][q].first << "\n"; + std::clog << "p : " << p << "\n"; + std::clog << "q : " << q << "\n"; + std::clog << "land1.land.size() : " << land1.land.size() << std::endl; + std::clog << "land2.land.size() : " << land2.land.size() << std::endl; + std::clog << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl; + std::clog << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl; + std::clog << "land1.land[i][p].first : " << land1.land[i][p].first << "\n"; + std::clog << "land2.land[i][q].first : " << land2.land[i][q].first << "\n"; } if (land1.land[i][p].first < land2.land[i][q].first) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "first \n"; - std::cout << " function_value(land2.land[i][q-1],land2.land[i][q],land1.land[i][p].first) : " + std::clog << "first \n"; + std::clog << " function_value(land2.land[i][q-1],land2.land[i][q],land1.land[i][p].first) : " << function_value(land2.land[i][q - 1], land2.land[i][q], land1.land[i][p].first) << "\n"; } lambda_n.push_back( @@ -1022,12 +1022,12 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap } if (land1.land[i][p].first > land2.land[i][q].first) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "Second \n"; - std::cout << "function_value(" << land1.land[i][p - 1].first << " " << land1.land[i][p - 1].second << " ," + std::clog << "Second \n"; + std::clog << "function_value(" << land1.land[i][p - 1].first << " " << land1.land[i][p - 1].second << " ," << land1.land[i][p].first << " " << land1.land[i][p].second << ", " << land2.land[i][q].first << " ) : " << function_value(land1.land[i][p - 1], land1.land[i][p - 1], land2.land[i][q].first) << "\n"; - std::cout << "oper( " << function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first) << "," + std::clog << "oper( " << function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first) << "," << land2.land[i][q].second << " : " << oper(land2.land[i][q].second, function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first)) @@ -1040,19 +1040,19 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap continue; } if (land1.land[i][p].first == land2.land[i][q].first) { - if (operation_on_pair_of_landscapesDBG) std::cout << "Third \n"; + if (operation_on_pair_of_landscapesDBG) std::clog << "Third \n"; lambda_n.push_back( std::make_pair(land2.land[i][q].first, oper(land1.land[i][p].second, land2.land[i][q].second))); ++p; ++q; } if (operation_on_pair_of_landscapesDBG) { - std::cout << "Next iteration \n"; + std::clog << "Next iteration \n"; } } while ((p + 1 < land1.land[i].size()) && (q + 1 >= land2.land[i].size())) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "New point : " << land1.land[i][p].first + std::clog << "New point : " << land1.land[i][p].first << " oper(land1.land[i][p].second,0) : " << oper(land1.land[i][p].second, 0) << std::endl; } lambda_n.push_back(std::make_pair(land1.land[i][p].first, oper(land1.land[i][p].second, 0))); @@ -1060,7 +1060,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap } while ((p + 1 >= land1.land[i].size()) && (q + 1 < land2.land[i].size())) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "New point : " << land2.land[i][q].first + std::clog << "New point : " << land2.land[i][q].first << " oper(0,land2.land[i][q].second) : " << oper(0, land2.land[i][q].second) << std::endl; } lambda_n.push_back(std::make_pair(land2.land[i][q].first, oper(0, land2.land[i][q].second))); @@ -1073,7 +1073,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap } if (land1.land.size() > std::min(land1.land.size(), land2.land.size())) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "land1.land.size() > std::min( land1.land.size() , land2.land.size() )" << std::endl; + std::clog << "land1.land.size() > std::min( land1.land.size() , land2.land.size() )" << std::endl; } for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size()); ++i) { @@ -1088,7 +1088,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap } if (land2.land.size() > std::min(land1.land.size(), land2.land.size())) { if (operation_on_pair_of_landscapesDBG) { - std::cout << "( land2.land.size() > std::min( land1.land.size() , land2.land.size() ) ) " << std::endl; + std::clog << "( land2.land.size() > std::min( land1.land.size() , land2.land.size() ) ) " << std::endl; } for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size()); ++i) { @@ -1102,7 +1102,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap } } if (operation_on_pair_of_landscapesDBG) { - std::cout << "operation_on_pair_of_landscapes END\n"; + std::clog << "operation_on_pair_of_landscapes END\n"; std::cin.ignore(); } return result; @@ -1110,20 +1110,20 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, const Persistence_landscape& pl2) { bool dbg = false; - if (dbg) std::cout << " compute_maximal_distance_non_symmetric \n"; + if (dbg) std::clog << " compute_maximal_distance_non_symmetric \n"; // this distance is not symmetric. It compute ONLY distance between inflection points of pl1 and pl2. double maxDist = 0; size_t minimalNumberOfLevels = std::min(pl1.land.size(), pl2.land.size()); for (size_t level = 0; level != minimalNumberOfLevels; ++level) { if (dbg) { - std::cout << "Level : " << level << std::endl; - std::cout << "PL1 : \n"; + std::clog << "Level : " << level << std::endl; + std::clog << "PL1 : \n"; for (size_t i = 0; i != pl1.land[level].size(); ++i) { - std::cout << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n"; + std::clog << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n"; } - std::cout << "PL2 : \n"; + std::clog << "PL2 : \n"; for (size_t i = 0; i != pl2.land[level].size(); ++i) { - std::cout << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n"; + std::clog << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n"; } std::cin.ignore(); } @@ -1143,24 +1143,24 @@ double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, if (maxDist <= val) maxDist = val; if (dbg) { - std::cout << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << "," + std::clog << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << "," << pl2.land[level][p2Count + 1].first << "] \n"; - std::cout << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; - std::cout << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : " + std::clog << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; + std::clog << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : " << function_value(pl2.land[level][p2Count], pl2.land[level][p2Count + 1], pl1.land[level][i].first) << std::endl; - std::cout << "val : " << val << std::endl; + std::clog << "val : " << val << std::endl; std::cin.ignore(); } } } - if (dbg) std::cout << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl; + if (dbg) std::clog << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl; if (minimalNumberOfLevels < pl1.land.size()) { for (size_t level = minimalNumberOfLevels; level != pl1.land.size(); ++level) { for (size_t i = 0; i != pl1.land[level].size(); ++i) { - if (dbg) std::cout << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; + if (dbg) std::clog << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl; if (maxDist < pl1.land[level][i].second) maxDist = pl1.land[level][i].second; } } @@ -1181,7 +1181,7 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const lan = lan.abs(); if (dbg) { - std::cout << "Abs of difference ; " << lan << std::endl; + std::clog << "Abs of difference ; " << lan << std::endl; getchar(); } @@ -1189,17 +1189,17 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const // \int_{- \infty}^{+\infty}| first-second |^p double result; if (p != 1) { - if (dbg) std::cout << "Power != 1, compute integral to the power p\n"; + if (dbg) std::clog << "Power != 1, compute integral to the power p\n"; result = lan.compute_integral_of_landscape(p); } else { - if (dbg) std::cout << "Power = 1, compute integral \n"; + if (dbg) std::clog << "Power = 1, compute integral \n"; result = lan.compute_integral_of_landscape(); } // (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p) return pow(result, 1.0 / p); } else { // p == infty - if (dbg) std::cout << "Power = infty, compute maximum \n"; + if (dbg) std::clog << "Power = infty, compute maximum \n"; return lan.compute_maximum(); } } @@ -1220,7 +1220,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ for (size_t level = 0; level != std::min(l1.size(), l2.size()); ++level) { if (dbg) { - std::cout << "Computing inner product for a level : " << level << std::endl; + std::clog << "Computing inner product for a level : " << level << std::endl; getchar(); } auto&& l1_land_level = l1.land[level]; @@ -1267,14 +1267,14 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ result += contributionFromThisPart; if (dbg) { - std::cout << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first + std::clog << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first << " , " << l1_land_level[l1It + 1].first << std::endl; - std::cout << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first + std::clog << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first << " , " << l2_land_level[l2It + 1].first << std::endl; - std::cout << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl; - std::cout << "x1 : " << x1 << " , x2 : " << x2 << std::endl; - std::cout << "contributionFromThisPart : " << contributionFromThisPart << std::endl; - std::cout << "result : " << result << std::endl; + std::clog << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl; + std::clog << "x1 : " << x1 << " , x2 : " << x2 << std::endl; + std::clog << "contributionFromThisPart : " << contributionFromThisPart << std::endl; + std::clog << "result : " << result << std::endl; getchar(); } @@ -1290,11 +1290,11 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ // in this case, we increment both: ++l2It; if (dbg) { - std::cout << "Incrementing both \n"; + std::clog << "Incrementing both \n"; } } else { if (dbg) { - std::cout << "Incrementing first \n"; + std::clog << "Incrementing first \n"; } } ++l1It; @@ -1302,7 +1302,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_ // in this case we increment l2It ++l2It; if (dbg) { - std::cout << "Incrementing second \n"; + std::clog << "Incrementing second \n"; } } @@ -1361,7 +1361,7 @@ void Persistence_landscape::plot(const char* filename, double xRangeBegin, doubl } out << "EOF" << std::endl; } - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; } diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h index b17fc0a5..537131da 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h +++ b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h @@ -155,9 +155,9 @@ class Persistence_landscape_on_grid { double dx = (this->grid_max - this->grid_min) / static_cast(this->values_of_landscapes.size() - 1); if (dbg) { - std::cout << "this->grid_max : " << this->grid_max << std::endl; - std::cout << "this->grid_min : " << this->grid_min << std::endl; - std::cout << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl; + std::clog << "this->grid_max : " << this->grid_max << std::endl; + std::clog << "this->grid_min : " << this->grid_min << std::endl; + std::clog << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl; getchar(); } @@ -169,14 +169,14 @@ class Persistence_landscape_on_grid { if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level]; if (dbg) { - std::cout << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size() + std::clog << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size() << " , level : " << level << std::endl; if (this->values_of_landscapes[i].size() > level) - std::cout << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl; - std::cout << "previous_y : " << previous_y << std::endl; - std::cout << "current_y : " << current_y << std::endl; - std::cout << "dx : " << dx << std::endl; - std::cout << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl; + std::clog << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl; + std::clog << "previous_y : " << previous_y << std::endl; + std::clog << "current_y : " << current_y << std::endl; + std::clog << "dx : " << dx << std::endl; + std::clog << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl; } result += 0.5 * dx * (previous_y + current_y); @@ -213,10 +213,10 @@ class Persistence_landscape_on_grid { if (this->values_of_landscapes[0].size() > level) previous_y = this->values_of_landscapes[0][level]; if (dbg) { - std::cout << "dx : " << dx << std::endl; - std::cout << "previous_x : " << previous_x << std::endl; - std::cout << "previous_y : " << previous_y << std::endl; - std::cout << "power : " << p << std::endl; + std::clog << "dx : " << dx << std::endl; + std::clog << "previous_x : " << previous_x << std::endl; + std::clog << "previous_y : " << previous_y << std::endl; + std::clog << "power : " << p << std::endl; getchar(); } @@ -225,7 +225,7 @@ class Persistence_landscape_on_grid { double current_y = 0; if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level]; - if (dbg) std::cout << "current_y : " << current_y << std::endl; + if (dbg) std::clog << "current_y : " << current_y << std::endl; if (current_y == previous_y) continue; @@ -235,7 +235,7 @@ class Persistence_landscape_on_grid { double b = coef.second; if (dbg) { - std::cout << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x + std::clog << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x << "," << current_y << ") is : " << a << "x+" << b << std::endl; } @@ -249,14 +249,14 @@ class Persistence_landscape_on_grid { } result += value_to_add; if (dbg) { - std::cout << "Increasing result by : " << value_to_add << std::endl; - std::cout << "result : " << result << std::endl; + std::clog << "Increasing result by : " << value_to_add << std::endl; + std::clog << "result : " << result << std::endl; getchar(); } previous_x = current_x; previous_y = current_y; } - if (dbg) std::cout << "The total result is : " << result << std::endl; + if (dbg) std::clog << "The total result is : " << result << std::endl; return result; } @@ -297,10 +297,10 @@ class Persistence_landscape_on_grid { size_t position = size_t((x - this->grid_min) / dx); if (dbg) { - std::cout << "This is a procedure compute_value_at_a_given_point \n"; - std::cout << "level : " << level << std::endl; - std::cout << "x : " << x << std::endl; - std::cout << "position : " << position << std::endl; + std::clog << "This is a procedure compute_value_at_a_given_point \n"; + std::clog << "level : " << level << std::endl; + std::clog << "x : " << x << std::endl; + std::clog << "position : " << position << std::endl; } // check if we are not exactly in the grid point: if (almost_equal(position * dx + this->grid_min, x)) { @@ -432,23 +432,23 @@ class Persistence_landscape_on_grid { bool operator==(const Persistence_landscape_on_grid& rhs) const { bool dbg = true; if (this->values_of_landscapes.size() != rhs.values_of_landscapes.size()) { - if (dbg) std::cout << "values_of_landscapes of incompatible sizes\n"; + if (dbg) std::clog << "values_of_landscapes of incompatible sizes\n"; return false; } if (!almost_equal(this->grid_min, rhs.grid_min)) { - if (dbg) std::cout << "grid_min not equal\n"; + if (dbg) std::clog << "grid_min not equal\n"; return false; } if (!almost_equal(this->grid_max, rhs.grid_max)) { - if (dbg) std::cout << "grid_max not equal\n"; + if (dbg) std::clog << "grid_max not equal\n"; return false; } for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) { for (size_t aa = 0; aa != this->values_of_landscapes[i].size(); ++aa) { if (!almost_equal(this->values_of_landscapes[i][aa], rhs.values_of_landscapes[i][aa])) { if (dbg) { - std::cout << "Problem in the position : " << i << " of values_of_landscapes. \n"; - std::cout << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl; + std::clog << "Problem in the position : " << i << " of values_of_landscapes. \n"; + std::clog << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl; } return false; } @@ -615,7 +615,7 @@ class Persistence_landscape_on_grid { double previous_y_l1 = 0; double previous_y_l2 = 0; for (size_t i = 0; i != l1.values_of_landscapes.size(); ++i) { - if (dbg) std::cout << "i : " << i << std::endl; + if (dbg) std::clog << "i : " << i << std::endl; double current_x = previous_x + dx; double current_y_l1 = 0; @@ -625,11 +625,11 @@ class Persistence_landscape_on_grid { if (l2.values_of_landscapes[i].size() > level) current_y_l2 = l2.values_of_landscapes[i][level]; if (dbg) { - std::cout << "previous_x : " << previous_x << std::endl; - std::cout << "previous_y_l1 : " << previous_y_l1 << std::endl; - std::cout << "current_y_l1 : " << current_y_l1 << std::endl; - std::cout << "previous_y_l2 : " << previous_y_l2 << std::endl; - std::cout << "current_y_l2 : " << current_y_l2 << std::endl; + std::clog << "previous_x : " << previous_x << std::endl; + std::clog << "previous_y_l1 : " << previous_y_l1 << std::endl; + std::clog << "current_y_l1 : " << current_y_l1 << std::endl; + std::clog << "previous_y_l2 : " << previous_y_l2 << std::endl; + std::clog << "current_y_l2 : " << current_y_l2 << std::endl; } std::pair l1_coords = compute_parameters_of_a_line(std::make_pair(previous_x, previous_y_l1), @@ -646,11 +646,11 @@ class Persistence_landscape_on_grid { double d = l2_coords.second; if (dbg) { - std::cout << "Here are the formulas for a line: \n"; - std::cout << "a : " << a << std::endl; - std::cout << "b : " << b << std::endl; - std::cout << "c : " << c << std::endl; - std::cout << "d : " << d << std::endl; + std::clog << "Here are the formulas for a line: \n"; + std::clog << "a : " << a << std::endl; + std::clog << "b : " << b << std::endl; + std::clog << "c : " << c << std::endl; + std::clog << "d : " << d << std::endl; } // now, to compute the inner product in this interval we need to compute the integral of (ax+b)(cx+d) = acx^2 + @@ -663,11 +663,11 @@ class Persistence_landscape_on_grid { (a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x); if (dbg) { - std::cout << "Value of the integral on the left end i.e. : " << previous_x << " is : " + std::clog << "Value of the integral on the left end i.e. : " << previous_x << " is : " << a * c / 3 * previous_x * previous_x * previous_x + (a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x << std::endl; - std::cout << "Value of the integral on the right end i.e. : " << current_x << " is " + std::clog << "Value of the integral on the right end i.e. : " << current_x << " is " << a * c / 3 * current_x * current_x * current_x + (a * d + b * c) / 2 * current_x * current_x + b * d * current_x << std::endl; @@ -676,8 +676,8 @@ class Persistence_landscape_on_grid { result += added_value; if (dbg) { - std::cout << "added_value : " << added_value << std::endl; - std::cout << "result : " << result << std::endl; + std::clog << "added_value : " << added_value << std::endl; + std::clog << "result : " << result << std::endl; getchar(); } @@ -703,8 +703,8 @@ class Persistence_landscape_on_grid { // time: if (dbg) { - std::cout << "first : " << first << std::endl; - std::cout << "second : " << second << std::endl; + std::clog << "first : " << first << std::endl; + std::clog << "second : " << second << std::endl; getchar(); } @@ -712,14 +712,14 @@ class Persistence_landscape_on_grid { Persistence_landscape_on_grid lan = first - second; if (dbg) { - std::cout << "Difference : " << lan << std::endl; + std::clog << "Difference : " << lan << std::endl; } //| first-second |: lan.abs(); if (dbg) { - std::cout << "Abs : " << lan << std::endl; + std::clog << "Abs : " << lan << std::endl; } if (p < std::numeric_limits::max()) { @@ -727,18 +727,18 @@ class Persistence_landscape_on_grid { double result; if (p != 1) { if (dbg) { - std::cout << "p : " << p << std::endl; + std::clog << "p : " << p << std::endl; getchar(); } result = lan.compute_integral_of_landscape(p); if (dbg) { - std::cout << "integral : " << result << std::endl; + std::clog << "integral : " << result << std::endl; getchar(); } } else { result = lan.compute_integral_of_landscape(); if (dbg) { - std::cout << "integral, without power : " << result << std::endl; + std::clog << "integral, without power : " << result << std::endl; getchar(); } } @@ -820,7 +820,7 @@ class Persistence_landscape_on_grid { this->grid_max = (to_average[0])->grid_max; if (dbg) { - std::cout << "Computations of average. The data from the current landscape have been cleared. We are ready to do " + std::clog << "Computations of average. The data from the current landscape have been cleared. We are ready to do " "the computations. \n"; } @@ -835,7 +835,7 @@ class Persistence_landscape_on_grid { this->values_of_landscapes[grid_point] = std::vector(maximal_size_of_vector); if (dbg) { - std::cout << "We are considering the point : " << grid_point + std::clog << "We are considering the point : " << grid_point << " of the grid. In this point, there are at most : " << maximal_size_of_vector << " nonzero landscape functions \n"; } @@ -931,12 +931,12 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto size_t number_of_points_, unsigned number_of_levels) { bool dbg = false; if (dbg) { - std::cout << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_ + std::clog << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_ << ", grid_max_ : " << grid_max_ << ", number_of_points_ : " << number_of_points_ << ", number_of_levels: " << number_of_levels << std::endl; - std::cout << "Here are the intervals at our disposal : \n"; + std::clog << "Here are the intervals at our disposal : \n"; for (size_t i = 0; i != p.size(); ++i) { - std::cout << p[i].first << " , " << p[i].second << std::endl; + std::clog << p[i].first << " , " << p[i].second << std::endl; } } @@ -976,17 +976,17 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto size_t grid_interval_midpoint = (size_t)(0.5 * (grid_interval_begin + grid_interval_end)); if (dbg) { - std::cout << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl; + std::clog << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl; - std::cout << "grid_interval_begin : " << grid_interval_begin << std::endl; - std::cout << "grid_interval_end : " << grid_interval_end << std::endl; - std::cout << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl; + std::clog << "grid_interval_begin : " << grid_interval_begin << std::endl; + std::clog << "grid_interval_end : " << grid_interval_end << std::endl; + std::clog << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl; } double landscape_value = dx; for (size_t i = grid_interval_begin + 1; i < grid_interval_midpoint; ++i) { if (dbg) { - std::cout << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value + std::clog << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value << std::endl; } if (number_of_levels != std::numeric_limits::max()) { @@ -1044,7 +1044,7 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto } if (dbg) { - std::cout << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value + std::clog << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value << std::endl; } } @@ -1246,7 +1246,7 @@ void Persistence_landscape_on_grid::plot(const char* filename, double min_x, dou } out << "EOF" << std::endl; } - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; } diff --git a/src/Persistence_representations/include/gudhi/Persistence_vectors.h b/src/Persistence_representations/include/gudhi/Persistence_vectors.h index be985909..fab96900 100644 --- a/src/Persistence_representations/include/gudhi/Persistence_vectors.h +++ b/src/Persistence_representations/include/gudhi/Persistence_vectors.h @@ -189,7 +189,7 @@ class Vector_distances_in_diagram { } out << std::endl; out.close(); - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; } @@ -360,9 +360,9 @@ template void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap(size_t where_to_cut) { bool dbg = false; if (dbg) { - std::cout << "Here are the intervals : \n"; + std::clog << "Here are the intervals : \n"; for (size_t i = 0; i != this->intervals.size(); ++i) { - std::cout << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; + std::clog << this->intervals[i].first << " , " << this->intervals[i].second << std::endl; } } where_to_cut = std::min( @@ -385,14 +385,14 @@ void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap 0.5 * (this->intervals[j].first + this->intervals[j].second))))); if (dbg) { - std::cout << "Value : " << value << std::endl; - std::cout << "heap.front() : " << heap.front() << std::endl; + std::clog << "Value : " << value << std::endl; + std::clog << "heap.front() : " << heap.front() << std::endl; getchar(); } if (-value < heap.front()) { if (dbg) { - std::cout << "Replacing : " << heap.front() << " with : " << -value << std::endl; + std::clog << "Replacing : " << heap.front() << " with : " << -value << std::endl; getchar(); } // remove the first element from the heap @@ -431,11 +431,11 @@ void Vector_distances_in_diagram::compute_sorted_vector_of_distances_via_heap } if (dbg) { - std::cout << "This is the heap after all the operations :\n"; + std::clog << "This is the heap after all the operations :\n"; for (size_t i = 0; i != heap.size(); ++i) { - std::cout << heap[i] << " "; + std::clog << heap[i] << " "; } - std::cout << std::endl; + std::clog << std::endl; } this->sorted_vector_of_distances = heap; @@ -519,11 +519,11 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra bool dbg = false; if (dbg) { - std::cout << "Entering double Vector_distances_in_diagram::distance( const Abs_Topological_data_with_distances* " + std::clog << "Entering double Vector_distances_in_diagram::distance( const Abs_Topological_data_with_distances* " "second , double power ) procedure \n"; - std::cout << "Power : " << power << std::endl; - std::cout << "This : " << *this << std::endl; - std::cout << "second : " << second_ << std::endl; + std::clog << "Power : " << power << std::endl; + std::clog << "This : " << *this << std::endl; + std::clog << "second : " << second_ << std::endl; } double result = 0; @@ -531,7 +531,7 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra ++i) { if (power == 1) { if (dbg) { - std::cout << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] + std::clog << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] << " | : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]) << std::endl; } @@ -545,7 +545,7 @@ double Vector_distances_in_diagram::distance(const Vector_distances_in_diagra result = fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]); } if (dbg) { - std::cout << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] + std::clog << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i] << " : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]) << std::endl; } diff --git a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h index 8b348fd1..a5bc1bca 100644 --- a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h +++ b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h @@ -50,28 +50,28 @@ std::vector > read_persistence_intervals_in_one_dimens final_barcode.reserve(barcode_initial.size()); if (dbg) { - std::cout << "Here are the intervals that we read from the file : \n"; + std::clog << "Here are the intervals that we read from the file : \n"; for (size_t i = 0; i != barcode_initial.size(); ++i) { - std::cout << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl; + std::clog << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl; } getchar(); } for (size_t i = 0; i != barcode_initial.size(); ++i) { if (dbg) { - std::cout << "Considering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second + std::clog << "Considering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl; } if (barcode_initial[i].first > barcode_initial[i].second) { // note that in this case barcode_initial[i].second != std::numeric_limits::infinity() - if (dbg) std::cout << "Swap and enter \n"; + if (dbg) std::clog << "Swap and enter \n"; // swap them to make sure that birth < death final_barcode.push_back(std::pair(barcode_initial[i].second, barcode_initial[i].first)); continue; } else { if (barcode_initial[i].second != std::numeric_limits::infinity()) { - if (dbg) std::cout << "Simply enters\n"; + if (dbg) std::clog << "Simply enters\n"; // in this case, due to the previous conditions we know that barcode_initial[i].first < // barcode_initial[i].second, so we put them as they are final_barcode.push_back(std::pair(barcode_initial[i].first, barcode_initial[i].second)); @@ -91,11 +91,11 @@ std::vector > read_persistence_intervals_in_one_dimens } if (dbg) { - std::cout << "Here are the final bars that we are sending further : \n"; + std::clog << "Here are the final bars that we are sending further : \n"; for (size_t i = 0; i != final_barcode.size(); ++i) { - std::cout << final_barcode[i].first << " " << final_barcode[i].second << std::endl; + std::clog << final_barcode[i].first << " " << final_barcode[i].second << std::endl; } - std::cout << "final_barcode.size() : " << final_barcode.size() << std::endl; + std::clog << "final_barcode.size() : " << final_barcode.size() << std::endl; getchar(); } diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp index 3d088b58..54b1f77d 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp @@ -17,12 +17,12 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program computes average of persistence heat maps stored in files (the files needs to be " + std::clog << "This program computes average of persistence heat maps stored in files (the files needs to be " << "created beforehand).\n" << "The parameters of this programs are names of files with persistence heat maps.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -46,6 +46,6 @@ int main(int argc, char** argv) { delete maps[i]; } - std::cout << "Average can be found in 'average.mps' file\n"; + std::clog << "Average can be found in 'average.mps' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp index 48000bb1..757a97fc 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp @@ -19,14 +19,14 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program computes distance of persistence heat maps stored in files (the files needs to be " + std::clog << "This program computes distance of persistence heat maps stored in files (the files needs to be " << "created beforehand).\n" << "The first parameter of a program is an integer p. The program compute L^p distance of the two heat " << "maps. For L^infty distance choose p = -1. \n" << "The remaining parameters of this program are names of files with persistence heat maps.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -69,14 +69,14 @@ int main(int argc, char** argv) { out.open("distance.mps"); for (size_t i = 0; i != distance.size(); ++i) { for (size_t j = 0; j != distance.size(); ++j) { - std::cout << distance[i][j] << " "; + std::clog << distance[i][j] << " "; out << distance[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'distance.mps' file\n"; + std::clog << "Distance can be found in 'distance.mps' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp index 8a96f1b0..e7f18ce1 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp @@ -18,12 +18,12 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program computes scalar product of persistence heat maps stored in a file (the file needs to be " + std::clog << "This program computes scalar product of persistence heat maps stored in a file (the file needs to be " << "created beforehand). \n" << "The parameters of this programs are names of files with persistence heat maps.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -60,14 +60,14 @@ int main(int argc, char** argv) { out.open("scalar_product.mps"); for (size_t i = 0; i != scalar_product.size(); ++i) { for (size_t j = 0; j != scalar_product.size(); ++j) { - std::cout << scalar_product[i][j] << " "; + std::clog << scalar_product[i][j] << " "; out << scalar_product[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'scalar_product.mps' file\n"; + std::clog << "Distance can be found in 'scalar_product.mps' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp index f82a39b0..6b38b930 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp @@ -20,7 +20,7 @@ using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " << "provided as an input.The Gaussian kernels are weighted by the arc tangential of their persistence.\n" << "The first parameter of a program is an integer, a size of a grid.\n" << "The second and third parameters are min and max of the grid. If you want those numbers to be computed " @@ -36,7 +36,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 7) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -58,7 +58,7 @@ int main(int argc, char** argv) { std::vector > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1); for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl; + std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl; Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension); std::stringstream ss; diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp index 5a657b13..fece2e36 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp @@ -19,7 +19,7 @@ using distance_from_diagonal_scaling = Gudhi::Persistence_representations::dista using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " << "provided as an input.The Gaussian kernels are weighted by the distance of a center from the " << "diagonal.\n" << "The first parameter of a program is an integer, a size of a grid.\n" @@ -36,7 +36,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 7) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -58,7 +58,7 @@ int main(int argc, char** argv) { std::vector > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1); for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl; + std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl; Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension); std::stringstream ss; diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp index 8d67a54d..86e6fc19 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp @@ -21,7 +21,7 @@ using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " << "provided as an input.The Gaussian kernels are weighted by the square of distance of a center from the " << "diagonal.\n" << "The first parameter of a program is an integer, a size of a grid.\n" @@ -38,7 +38,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 7) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -60,7 +60,7 @@ int main(int argc, char** argv) { std::vector > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1); for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl; + std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl; Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension); std::stringstream ss; diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp index 29170c32..ca27f8e3 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp @@ -19,7 +19,7 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) " << "provided as an input.\n" << "The first parameter of a program is an integer, a size of a grid.\n" << "The second and third parameters are min and max of the grid. If you want those numbers to be computed " @@ -35,7 +35,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 7) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } size_t size_of_grid = (size_t)atoi(argv[1]); @@ -55,7 +55,7 @@ int main(int argc, char** argv) { std::vector > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1); for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a heat map based on file : " << filenames[i] << std::endl; + std::clog << "Creating a heat map based on file : " << filenames[i] << std::endl; Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension); std::stringstream ss; diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp index 995771b9..d2ebcc7e 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp @@ -18,7 +18,7 @@ using PSSK = Gudhi::Persistence_representations::PSSK; int main(int argc, char** argv) { - std::cout << "This program creates PSSK files (*.pssk) of persistence diagrams files (*.pers) " + std::clog << "This program creates PSSK files (*.pssk) of persistence diagrams files (*.pers) " << "provided as an input.\n" << "The first parameter of a program is an integer, a size of a grid.\n" << "The second and third parameters are min and max of the grid. If you want those numbers to be computed " @@ -34,7 +34,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 7) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -56,7 +56,7 @@ int main(int argc, char** argv) { std::vector > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1); for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a PSSK based on a file : " << filenames[i] << std::endl; + std::clog << "Creating a PSSK based on a file : " << filenames[i] << std::endl; PSSK l(filenames[i], filter, size_of_grid, min_, max_, dimension); std::stringstream ss; diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp index cf6e07cb..87cc97d1 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp +++ b/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp @@ -17,10 +17,10 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps; int main(int argc, char** argv) { - std::cout << "This program creates a gnuplot script from a persistence heat maps stored in a file (the file needs " + std::clog << "This program creates a gnuplot script from a persistence heat maps stored in a file (the file needs " << "to be created beforehand). Please call the code with the name of a single heat maps file \n"; if (argc != 2) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } Persistence_heat_maps l; diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp index 519cc47d..72325cad 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp @@ -18,7 +18,7 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals; int main(int argc, char** argv) { - std::cout << "This program computes the range of birth and death times of persistence pairs in diagrams provided as " + std::clog << "This program computes the range of birth and death times of persistence pairs in diagrams provided as " << "an input.\n" << "The first parameter is the dimension of persistence to be used to create persistence intervals. " << "If your file contains the information about dimension of persistence pairs, please provide here the " @@ -27,7 +27,7 @@ int main(int argc, char** argv) { << "The remaining parameters of the program are the names of files with persistence diagrams.\n"; if (argc < 3) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -45,12 +45,12 @@ int main(int argc, char** argv) { double max_ = -std::numeric_limits::max(); for (size_t file_no = 0; file_no != filenames.size(); ++file_no) { - std::cout << "Creating diagram based on a file : " << filenames[file_no] << std::endl; + std::clog << "Creating diagram based on a file : " << filenames[file_no] << std::endl; Persistence_intervals p(filenames[file_no], dimension); std::pair min_max_ = p.get_x_range(); if (min_max_.first < min_) min_ = min_max_.first; if (min_max_.second > max_) max_ = min_max_.second; } - std::cout << "Birth-death range : min: " << min_ << ", max: " << max_ << std::endl; + std::clog << "Birth-death range : min: " << min_ << ", max: " << max_ << std::endl; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp index 6155727a..465bf72e 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp @@ -18,7 +18,7 @@ using Persistence_intervals_with_distances = Gudhi::Persistence_representations::Persistence_intervals_with_distances; int main(int argc, char** argv) { - std::cout << "This program computes the bottleneck distance of persistence pairs in diagrams provided as " + std::clog << "This program computes the bottleneck distance of persistence pairs in diagrams provided as " << "an input.\n" << "The first parameter is the dimension of persistence to be used to create persistence intervals. " << "If your file contains the information about dimension of persistence pairs, please provide here the " @@ -27,7 +27,7 @@ int main(int argc, char** argv) { << "The remaining parameters of the program are the names of files with persistence diagrams.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -70,14 +70,14 @@ int main(int argc, char** argv) { out.open("distance.itv"); for (size_t i = 0; i != distance.size(); ++i) { for (size_t j = 0; j != distance.size(); ++j) { - std::cout << distance[i][j] << " "; + std::clog << distance[i][j] << " "; out << distance[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'distance.itv' file\n"; + std::clog << "Distance can be found in 'distance.itv' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp index dd6e1a5b..ea1fe717 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp @@ -18,10 +18,10 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals; int main(int argc, char** argv) { - std::cout << "This program compute the dominant intervals. A number of intervals to be displayed is a parameter of " + std::clog << "This program compute the dominant intervals. A number of intervals to be displayed is a parameter of " "this program. \n"; if (argc != 4) { - std::cout << "To run this program, please provide the name of a file with persistence diagram, dimension of " + std::clog << "To run this program, please provide the name of a file with persistence diagram, dimension of " "intervals that should be taken into account (if your file contains only persistence pairs in a " "single dimension, set it up to -1) and number of dominant intervals you would like to get \n"; return 1; @@ -33,9 +33,9 @@ int main(int argc, char** argv) { } Persistence_intervals p(argv[1], dimension); std::vector > dominant_intervals = p.dominant_intervals(atoi(argv[3])); - std::cout << "Here are the dominant intervals : " << std::endl; + std::clog << "Here are the dominant intervals : " << std::endl; for (size_t i = 0; i != dominant_intervals.size(); ++i) { - std::cout << " " << dominant_intervals[i].first << "," << dominant_intervals[i].second << " " << std::endl; + std::clog << " " << dominant_intervals[i].first << "," << dominant_intervals[i].second << " " << std::endl; } return 0; diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp index 13d2133f..e5eec3f5 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp @@ -18,10 +18,10 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals; int main(int argc, char** argv) { - std::cout << "This program computes a histogram of barcode's length. A number of bins in the histogram is a " + std::clog << "This program computes a histogram of barcode's length. A number of bins in the histogram is a " << "parameter of this program. \n"; if ((argc != 3) && (argc != 4)) { - std::cout << "To run this program, please provide the name of a file with persistence diagram and number of " + std::clog << "To run this program, please provide the name of a file with persistence diagram and number of " << "dominant intervals you would like to get. Set a negative number dominant intervals value " << "If your file contains only birth-death pairs.\n" << "The third parameter is the dimension of the persistence that is to be used. If your " @@ -59,7 +59,7 @@ int main(int argc, char** argv) { out << std::endl; out.close(); - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp index 451be77f..27c69e07 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp @@ -19,7 +19,7 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in int main(int argc, char** argv) { if ((argc != 3) && (argc != 2)) { - std::cout << "This program creates a gnuplot script of Betti numbers from a single persistence diagram file" + std::clog << "This program creates a gnuplot script of Betti numbers from a single persistence diagram file" << "(*.pers).\n" << "To run this program, please provide the name of a file with persistence diagram.\n" << "The second optional parameter of a program is the dimension of the persistence that is to be used. " @@ -68,7 +68,7 @@ int main(int argc, char** argv) { out << std::endl; out.close(); - std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" + std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'" << gnuplot_script.str().c_str() << "\'\"" << std::endl; return 0; diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp index 09a56869..199a3266 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp +++ b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp @@ -19,7 +19,7 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in int main(int argc, char** argv) { if ((argc != 3) && (argc != 2)) { - std::cout << "This program creates a gnuplot script from a single persistence diagram file (*.pers).\n" + std::clog << "This program creates a gnuplot script from a single persistence diagram file (*.pers).\n" << "To run this program, please provide the name of a file with persistence diagram.\n" << "The second optional parameter of a program is the dimension of the persistence that is to be used. " << "If your file contains only birth-death pairs, you can skip this parameter.\n"; diff --git a/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp index 04a0ada4..612e9700 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp @@ -16,13 +16,13 @@ using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape; int main(int argc, char** argv) { - std::cout << "This program computes average of persistence landscapes stored in files (the files needs to be " + std::clog << "This program computes average of persistence landscapes stored in files (the files needs to be " << "created beforehand).\n" << "The parameters of this programs are names of files with persistence landscapes.\n"; std::vector filenames; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -46,6 +46,6 @@ int main(int argc, char** argv) { delete lands[i]; } - std::cout << "Average can be found in 'average.land' file\n"; + std::clog << "Average can be found in 'average.land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp index 1093c1aa..2246a37d 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp @@ -18,14 +18,14 @@ using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape; int main(int argc, char** argv) { - std::cout << "This program computes distance of persistence landscapes stored in files (the files needs to be " + std::clog << "This program computes distance of persistence landscapes stored in files (the files needs to be " << "created beforehand).\n" << "The first parameter of a program is an integer p. The program compute L^p distance of the two heat " << "maps. For L^infty distance choose p = -1. \n" << "The remaining parameters of this program are names of files with persistence landscapes.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -68,14 +68,14 @@ int main(int argc, char** argv) { out.open("distance.land"); for (size_t i = 0; i != distance.size(); ++i) { for (size_t j = 0; j != distance.size(); ++j) { - std::cout << distance[i][j] << " "; + std::clog << distance[i][j] << " "; out << distance[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'distance.land' file\n"; + std::clog << "Distance can be found in 'distance.land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp index 16b76497..44f50543 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp @@ -17,12 +17,12 @@ using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape; int main(int argc, char** argv) { - std::cout << "This program computes scalar product of persistence landscapes stored in a file (the file needs to be " + std::clog << "This program computes scalar product of persistence landscapes stored in a file (the file needs to be " << "created beforehand). \n" << "The parameters of this programs are names of files with persistence landscapes.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -59,14 +59,14 @@ int main(int argc, char** argv) { out.open("scalar_product.land"); for (size_t i = 0; i != scalar_product.size(); ++i) { for (size_t j = 0; j != scalar_product.size(); ++j) { - std::cout << scalar_product[i][j] << " "; + std::clog << scalar_product[i][j] << " "; out << scalar_product[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'scalar_product.land' file\n"; + std::clog << "Distance can be found in 'scalar_product.land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp index 4d772086..fab5c75f 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp @@ -18,7 +18,7 @@ using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape; int main(int argc, char** argv) { - std::cout << "This program creates persistence landscapes files (*.land) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence landscapes files (*.land) of persistence diagrams files (*.pers) " << "provided as an input.\n" << "The first parameter of this program is a dimension of persistence that will be used in creation of " << "the persistence heat maps." @@ -29,7 +29,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 3) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } std::vector filenames; @@ -43,7 +43,7 @@ int main(int argc, char** argv) { } for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating a landscape based on file : " << filenames[i] << std::endl; + std::clog << "Creating a landscape based on file : " << filenames[i] << std::endl; Persistence_landscape l(filenames[i], dimension); std::stringstream ss; ss << filenames[i] << ".land"; diff --git a/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp index 1fe03640..da9b9bba 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp @@ -16,10 +16,10 @@ using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape; int main(int argc, char** argv) { - std::cout << "This program creates a gnuplot script from a persistence landscape stored in a file (the file needs " + std::clog << "This program creates a gnuplot script from a persistence landscape stored in a file (the file needs " << "to be created beforehand). Please call the code with the name of a single landscape file.\n"; if (argc != 2) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp index f92cde72..39f7a67f 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp @@ -16,12 +16,12 @@ using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid; int main(int argc, char** argv) { - std::cout << "This program computes average of persistence landscapes on grid stored in files (the files needs to " + std::clog << "This program computes average of persistence landscapes on grid stored in files (the files needs to " << "be created beforehand).\n" << "The parameters of this programs are names of files with persistence landscapes on grid.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -46,6 +46,6 @@ int main(int argc, char** argv) { delete lands[i]; } - std::cout << "Average can be found in 'average.g_land' file\n"; + std::clog << "Average can be found in 'average.g_land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp index baec6aeb..01fd09d8 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp @@ -18,14 +18,14 @@ using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid; int main(int argc, char** argv) { - std::cout << "This program computes distance of persistence landscapes on grid stored in files (the files needs to " + std::clog << "This program computes distance of persistence landscapes on grid stored in files (the files needs to " << "be created beforehand).\n" << "The first parameter of a program is an integer p. The program compute L^p distance of the two heat " << "maps. For L^infty distance choose p = -1. \n" << "The remaining parameters of this program are names of files with persistence landscapes on grid.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -68,14 +68,14 @@ int main(int argc, char** argv) { out.open("distance.g_land"); for (size_t i = 0; i != distance.size(); ++i) { for (size_t j = 0; j != distance.size(); ++j) { - std::cout << distance[i][j] << " "; + std::clog << distance[i][j] << " "; out << distance[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'distance.g_land' file\n"; + std::clog << "Distance can be found in 'distance.g_land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp index e94dacdb..71c2f419 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp @@ -17,13 +17,13 @@ using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid; int main(int argc, char** argv) { - std::cout + std::clog << "This program computes scalar product of persistence landscapes on grid stored in a file (the file needs to " << "be created beforehand). \n" << "The parameters of this programs are names of files with persistence landscapes on grid.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -60,14 +60,14 @@ int main(int argc, char** argv) { out.open("scalar_product.g_land"); for (size_t i = 0; i != scalar_product.size(); ++i) { for (size_t j = 0; j != scalar_product.size(); ++j) { - std::cout << scalar_product[i][j] << " "; + std::clog << scalar_product[i][j] << " "; out << scalar_product[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'scalar_product.g_land' file\n"; + std::clog << "Distance can be found in 'scalar_product.g_land' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp index d510c3df..788313c4 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp @@ -18,7 +18,7 @@ using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid; int main(int argc, char** argv) { - std::cout << "This program creates persistence landscapes on grid files (*.g_land) of persistence diagrams files " + std::clog << "This program creates persistence landscapes on grid files (*.g_land) of persistence diagrams files " << "(*.pers) provided as an input.\n" << "The first parameter of a program is an integer, a size of a grid.\n" << "The second and third parameters are min and max of the grid. If you want those numbers to be computed " @@ -32,7 +32,7 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 6) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } @@ -51,7 +51,7 @@ int main(int argc, char** argv) { } for (size_t i = 0; i != filenames.size(); ++i) { - std::cout << "Creating persistence landscape on a grid based on a file : " << filenames[i] << std::endl; + std::clog << "Creating persistence landscape on a grid based on a file : " << filenames[i] << std::endl; Persistence_landscape_on_grid l; if ((min_ != -1) || (max_ != -1)) { l = Persistence_landscape_on_grid(filenames[i], min_, max_, size_of_grid, dimension); diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp index 4e20f37f..ec6112b5 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp @@ -16,11 +16,11 @@ using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid; int main(int argc, char** argv) { - std::cout << "This program creates a gnuplot script from a persistence landscape on grid stored in a file (the file " + std::clog << "This program creates a gnuplot script from a persistence landscape on grid stored in a file (the file " << "needs to be created beforehand). Please call the code with the name of a single landscape on grid file" << ".\n"; if (argc != 2) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } diff --git a/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp index 89e42f0f..4eb32eb3 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp +++ b/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp @@ -17,12 +17,12 @@ using Euclidean_distance = Gudhi::Euclidean_distance; using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram; int main(int argc, char** argv) { - std::cout << "This program computes average of persistence vectors stored in files (the files needs to " + std::clog << "This program computes average of persistence vectors stored in files (the files needs to " << "be created beforehand).\n" << "The parameters of this programs are names of files with persistence vectors.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -47,7 +47,7 @@ int main(int argc, char** argv) { delete lands[i]; } - std::cout << "Done \n"; + std::clog << "Done \n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp index 541dd25f..236981a3 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp +++ b/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp @@ -19,14 +19,14 @@ using Euclidean_distance = Gudhi::Euclidean_distance; using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram; int main(int argc, char** argv) { - std::cout << "This program compute distance of persistence vectors stored in a file (the file needs to be created " + std::clog << "This program compute distance of persistence vectors stored in a file (the file needs to be created " "beforehand). \n"; - std::cout << "The first parameter of a program is an integer p. The program compute l^p distance of the vectors. For " + std::clog << "The first parameter of a program is an integer p. The program compute l^p distance of the vectors. For " "l^infty distance choose p = -1. \n"; - std::cout << "The remaining parameters of this programs are names of files with persistence vectors.\n"; + std::clog << "The remaining parameters of this programs are names of files with persistence vectors.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -69,14 +69,14 @@ int main(int argc, char** argv) { out.open("distance.vect"); for (size_t i = 0; i != distance.size(); ++i) { for (size_t j = 0; j != distance.size(); ++j) { - std::cout << distance[i][j] << " "; + std::clog << distance[i][j] << " "; out << distance[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'distance.vect' file\n"; + std::clog << "Distance can be found in 'distance.vect' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp index bbc50c98..c6ea0e1c 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp +++ b/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp @@ -19,12 +19,12 @@ using Euclidean_distance = Gudhi::Euclidean_distance; using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram; int main(int argc, char** argv) { - std::cout << "This program computes scalar product of persistence vectors stored in a file (the file needs to " + std::clog << "This program computes scalar product of persistence vectors stored in a file (the file needs to " << "be created beforehand). \n" << "The parameters of this programs are names of files with persistence vectors.\n"; if (argc < 3) { - std::cout << "Wrong number of parameters, the program will now terminate \n"; + std::clog << "Wrong number of parameters, the program will now terminate \n"; return 1; } @@ -61,14 +61,14 @@ int main(int argc, char** argv) { out.open("scalar_product.vect"); for (size_t i = 0; i != scalar_product.size(); ++i) { for (size_t j = 0; j != scalar_product.size(); ++j) { - std::cout << scalar_product[i][j] << " "; + std::clog << scalar_product[i][j] << " "; out << scalar_product[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; out << std::endl; } out.close(); - std::cout << "Distance can be found in 'scalar_product.vect' file\n"; + std::clog << "Distance can be found in 'scalar_product.vect' file\n"; return 0; } diff --git a/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp index f974c3d3..608e04e5 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp +++ b/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp @@ -19,7 +19,7 @@ using Euclidean_distance = Gudhi::Euclidean_distance; using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram; int main(int argc, char** argv) { - std::cout << "This program creates persistence vectors files (*.vect) of persistence diagrams files (*.pers) " + std::clog << "This program creates persistence vectors files (*.vect) of persistence diagrams files (*.pers) " << "provided as an input.\n" << "The first parameter of this program is a dimension of persistence that will be used in creation of " << "the persistence heat maps." @@ -30,11 +30,11 @@ int main(int argc, char** argv) { << "The remaining parameters are the names of files with persistence diagrams. \n"; if (argc < 3) { - std::cout << "Wrong parameter list, the program will now terminate \n"; + std::clog << "Wrong parameter list, the program will now terminate \n"; return 1; } - std::cout << "The remaining parameters are the names of files with persistence diagrams. \n"; + std::clog << "The remaining parameters are the names of files with persistence diagrams. \n"; int dim = atoi(argv[1]); unsigned dimension = std::numeric_limits::max(); if (dim >= 0) { diff --git a/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp index de08fcfe..2decb134 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp +++ b/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp @@ -17,10 +17,10 @@ using Euclidean_distance = Gudhi::Euclidean_distance; using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram; int main(int argc, char** argv) { - std::cout << "This program create a Gnuplot script to plot persistence vector. Please call this program with the " + std::clog << "This program create a Gnuplot script to plot persistence vector. Please call this program with the " "name of file with persistence vector. \n"; if (argc != 2) { - std::cout << "Wrong number of parameters, the program will now terminate. \n"; + std::clog << "Wrong number of parameters, the program will now terminate. \n"; return 1; } Vector_distances_in_diagram l; diff --git a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp index 45757002..030b072a 100644 --- a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp +++ b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp @@ -74,7 +74,7 @@ int main(int argc, char * argv[]) { Rips_complex rips_complex_from_file(off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance()); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Compute Rips graph in " << elapsed_sec << " ms.\n"; + std::clog << "Compute Rips graph in " << elapsed_sec << " ms.\n"; // Construct the Rips complex in a Simplex Tree Simplex_tree st; @@ -86,16 +86,16 @@ int main(int argc, char * argv[]) { end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Compute Rips complex in " << elapsed_sec << " ms.\n"; - std::cout << " - dimension = " << st.dimension() << std::endl; - std::cout << " - number of simplices = " << st.num_simplices() << std::endl; + std::clog << "Compute Rips complex in " << elapsed_sec << " ms.\n"; + std::clog << " - dimension = " << st.dimension() << std::endl; + std::clog << " - number of simplices = " << st.num_simplices() << std::endl; // Sort the simplices in the order of the filtration start = std::chrono::system_clock::now(); st.initialize_filtration(); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Order the simplices of the filtration in " << elapsed_sec << " ms.\n"; + std::clog << "Order the simplices of the filtration in " << elapsed_sec << " ms.\n"; // Copy the keys inside the simplices start = std::chrono::system_clock::now(); @@ -106,22 +106,22 @@ int main(int argc, char * argv[]) { } end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Copied the keys inside the simplices in " << elapsed_sec << " ms.\n"; + std::clog << "Copied the keys inside the simplices in " << elapsed_sec << " ms.\n"; // Convert the simplex tree into a hasse diagram start = std::chrono::system_clock::now(); Gudhi::Hasse_complex<> hcpx(st); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Convert the simplex tree into a Hasse diagram in " << elapsed_sec << " ms.\n"; + std::clog << "Convert the simplex tree into a Hasse diagram in " << elapsed_sec << " ms.\n"; - std::cout << "Timings when using a simplex tree: \n"; + std::clog << "Timings when using a simplex tree: \n"; timing_persistence(st, p); timing_persistence(st, q); timing_persistence(st, p, q); - std::cout << "Timings when using a Hasse complex: \n"; + std::clog << "Timings when using a Hasse complex: \n"; timing_persistence(hcpx, p); timing_persistence(hcpx, q); timing_persistence(hcpx, p, q); @@ -130,7 +130,7 @@ int main(int argc, char * argv[]) { } end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << "Running the complex destructors in " << elapsed_sec << " ms.\n"; + std::clog << "Running the complex destructors in " << elapsed_sec << " ms.\n"; return 0; } @@ -145,13 +145,13 @@ timing_persistence(FilteredComplex & cpx Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, Field_Zp > pcoh(cpx); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Initialize pcoh in " << elapsed_sec << " ms.\n"; + std::clog << " Initialize pcoh in " << elapsed_sec << " ms.\n"; // initializes the coefficient field for homology start = std::chrono::system_clock::now(); pcoh.init_coefficients(p); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Initialize the coefficient field in " << elapsed_sec << " ms.\n"; + std::clog << " Initialize the coefficient field in " << elapsed_sec << " ms.\n"; start = std::chrono::system_clock::now(); @@ -159,12 +159,12 @@ timing_persistence(FilteredComplex & cpx end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Compute persistent homology in Z/" << p << "Z in " << elapsed_sec << " ms.\n"; + std::clog << " Compute persistent homology in Z/" << p << "Z in " << elapsed_sec << " ms.\n"; start = std::chrono::system_clock::now(); } end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Run the persistence destructors in " << elapsed_sec << " ms.\n"; + std::clog << " Run the persistence destructors in " << elapsed_sec << " ms.\n"; } template< typename FilteredComplex> @@ -179,13 +179,13 @@ timing_persistence(FilteredComplex & cpx Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, Multi_field > pcoh(cpx); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Initialize pcoh in " << elapsed_sec << " ms.\n"; + std::clog << " Initialize pcoh in " << elapsed_sec << " ms.\n"; // initializes the coefficient field for homology start = std::chrono::system_clock::now(); pcoh.init_coefficients(p, q); end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Initialize the coefficient field in " << elapsed_sec << " ms.\n"; + std::clog << " Initialize the coefficient field in " << elapsed_sec << " ms.\n"; // compute persistent homology, disgarding persistent features of life shorter than min_persistence start = std::chrono::system_clock::now(); @@ -194,11 +194,11 @@ timing_persistence(FilteredComplex & cpx end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Compute multi-field persistent homology in all coefficient fields Z/pZ " + std::clog << " Compute multi-field persistent homology in all coefficient fields Z/pZ " << "with p in [" << p << ";" << q << "] in " << elapsed_sec << " ms.\n"; start = std::chrono::system_clock::now(); } end = std::chrono::system_clock::now(); elapsed_sec = std::chrono::duration_cast(end - start).count(); - std::cout << " Run the persistence destructors in " << elapsed_sec << " ms.\n"; + std::clog << " Run the persistence destructors in " << elapsed_sec << " ms.\n"; } diff --git a/src/Persistent_cohomology/example/custom_persistence_sort.cpp b/src/Persistent_cohomology/example/custom_persistence_sort.cpp index be74cf50..87e9c207 100644 --- a/src/Persistent_cohomology/example/custom_persistence_sort.cpp +++ b/src/Persistent_cohomology/example/custom_persistence_sort.cpp @@ -70,26 +70,26 @@ struct cmp_intervals_by_dim_then_length { int main(int argc, char **argv) { std::vector points = random_points(); - std::cout << "Points size=" << points.size() << std::endl; + std::clog << "Points size=" << points.size() << std::endl; // Alpha complex persistence computation from generated points Alpha_complex alpha_complex_from_points(points); - std::cout << "alpha_complex_from_points" << std::endl; + std::clog << "alpha_complex_from_points" << std::endl; Simplex_tree simplex; - std::cout << "simplex" << std::endl; + std::clog << "simplex" << std::endl; if (alpha_complex_from_points.create_complex(simplex, 0.6)) { - std::cout << "simplex" << std::endl; + std::clog << "simplex" << std::endl; // ---------------------------------------------------------------------------- // Display information about the alpha complex // ---------------------------------------------------------------------------- - std::cout << "Simplicial complex is of dimension " << simplex.dimension() << + std::clog << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; // Sort the simplices in the order of the filtration simplex.initialize_filtration(); - std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl; + std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl; Persistent_cohomology pcoh(simplex); @@ -102,23 +102,23 @@ int main(int argc, char **argv) { auto persistent_pairs = pcoh.get_persistent_pairs(); std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); for (auto pair : persistent_pairs) { - std::cout << simplex.dimension(get<0>(pair)) << " " + std::clog << simplex.dimension(get<0>(pair)) << " " << simplex.filtration(get<0>(pair)) << " " << simplex.filtration(get<1>(pair)) << std::endl; } // Persistent Betti numbers - std::cout << "The persistent Betti numbers in interval [0.40, 0.41] are : "; + std::clog << "The persistent Betti numbers in interval [0.40, 0.41] are : "; for (int dim = 0; dim < simplex.dimension(); dim++) - std::cout << "b" << dim << " = " << pcoh.persistent_betti_number(dim, 0.40, 0.41) << " ; "; - std::cout << std::endl; + std::clog << "b" << dim << " = " << pcoh.persistent_betti_number(dim, 0.40, 0.41) << " ; "; + std::clog << std::endl; // Betti numbers std::vector betti_numbers = pcoh.betti_numbers(); - std::cout << "The Betti numbers are : "; + std::clog << "The Betti numbers are : "; for (std::size_t i = 0; i < betti_numbers.size(); i++) - std::cout << "b" << i << " = " << betti_numbers[i] << " ; "; - std::cout << std::endl; + std::clog << "b" << i << " = " << betti_numbers[i] << " ; "; + std::clog << std::endl; } return 0; } diff --git a/src/Persistent_cohomology/example/persistence_from_file.cpp b/src/Persistent_cohomology/example/persistence_from_file.cpp index d169cc63..79108730 100644 --- a/src/Persistent_cohomology/example/persistence_from_file.cpp +++ b/src/Persistent_cohomology/example/persistence_from_file.cpp @@ -37,9 +37,9 @@ int main(int argc, char * argv[]) { program_options(argc, argv, simplex_tree_file, output_file, p, min_persistence); - std::cout << "Simplex_tree from file=" << simplex_tree_file.c_str() << " - output_file=" << output_file.c_str() + std::clog << "Simplex_tree from file=" << simplex_tree_file.c_str() << " - output_file=" << output_file.c_str() << std::endl; - std::cout << " - p=" << p << " - min_persistence=" << min_persistence << std::endl; + std::clog << " - p=" << p << " - min_persistence=" << min_persistence << std::endl; // Read the list of simplices from a file. Simplex_tree<> simplex_tree; @@ -47,16 +47,16 @@ int main(int argc, char * argv[]) { std::ifstream simplex_tree_stream(simplex_tree_file); simplex_tree_stream >> simplex_tree; - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices" << std::endl; - std::cout << " - dimension " << simplex_tree.dimension() << std::endl; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices" << std::endl; + std::clog << " - dimension " << simplex_tree.dimension() << std::endl; /* - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for( auto f_simplex : simplex_tree.filtration_simplex_range() ) - { std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + { std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for( auto vertex : simplex_tree.simplex_vertex_range(f_simplex) ) - { std::cout << vertex << " "; } - std::cout << std::endl; + { std::clog << vertex << " "; } + std::clog << std::endl; }*/ // Sort the simplices in the order of the filtration @@ -96,7 +96,7 @@ void program_options(int argc, char * argv[] visible.add_options() ("help,h", "produce help message") ("output-file,o", po::value(&output_file)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout") + "Name of file in which the persistence diagram is written. Default print in std::clog") ("field-charac,p", po::value(&p)->default_value(11), "Characteristic p of the coefficient field Z/pZ for computing homology.") ("min-persistence,m", po::value(&min_persistence), @@ -114,17 +114,17 @@ void program_options(int argc, char * argv[] po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp index 3c91662f..bffaabdd 100644 --- a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp +++ b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp @@ -51,62 +51,62 @@ int main(int argc, char * const argv[]) { } // TEST OF INSERTION - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF INSERTION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF INSERTION" << std::endl; Simplex_tree st; // ++ FIRST - std::cout << " - INSERT (0,1,2)" << std::endl; + std::clog << " - INSERT (0,1,2)" << std::endl; typeVectorVertex SimplexVector = {0, 1, 2}; st.insert_simplex_and_subfaces(SimplexVector, 0.3); // ++ SECOND - std::cout << " - INSERT 3" << std::endl; + std::clog << " - INSERT 3" << std::endl; SimplexVector = {3}; st.insert_simplex_and_subfaces(SimplexVector, 0.1); // ++ THIRD - std::cout << " - INSERT (0,3)" << std::endl; + std::clog << " - INSERT (0,3)" << std::endl; SimplexVector = {0, 3}; st.insert_simplex_and_subfaces(SimplexVector, 0.2); // ++ FOURTH - std::cout << " - INSERT (0,1) (already inserted)" << std::endl; + std::clog << " - INSERT (0,1) (already inserted)" << std::endl; SimplexVector = {0, 1}; st.insert_simplex_and_subfaces(SimplexVector, 0.2); // ++ FIFTH - std::cout << " - INSERT (3,4,5)" << std::endl; + std::clog << " - INSERT (3,4,5)" << std::endl; SimplexVector = {3, 4, 5}; st.insert_simplex_and_subfaces(SimplexVector, 0.3); // ++ SIXTH - std::cout << " - INSERT (0,1,6,7)" << std::endl; + std::clog << " - INSERT (0,1,6,7)" << std::endl; SimplexVector = {0, 1, 6, 7}; st.insert_simplex_and_subfaces(SimplexVector, 0.4); // ++ SEVENTH - std::cout << " - INSERT (4,5,8,9)" << std::endl; + std::clog << " - INSERT (4,5,8,9)" << std::endl; SimplexVector = {4, 5, 8, 9}; st.insert_simplex_and_subfaces(SimplexVector, 0.4); // ++ EIGHTH - std::cout << " - INSERT (9,10,11)" << std::endl; + std::clog << " - INSERT (9,10,11)" << std::endl; SimplexVector = {9, 10, 11}; st.insert_simplex_and_subfaces(SimplexVector, 0.3); // ++ NINETH - std::cout << " - INSERT (2,10,12)" << std::endl; + std::clog << " - INSERT (2,10,12)" << std::endl; SimplexVector = {2, 10, 12}; st.insert_simplex_and_subfaces(SimplexVector, 0.3); // ++ TENTH - std::cout << " - INSERT (11,6)" << std::endl; + std::clog << " - INSERT (11,6)" << std::endl; SimplexVector = {6, 11}; st.insert_simplex_and_subfaces(SimplexVector, 0.2); // ++ ELEVENTH - std::cout << " - INSERT (13,14,15)" << std::endl; + std::clog << " - INSERT (13,14,15)" << std::endl; SimplexVector = {13, 14, 15}; st.insert_simplex_and_subfaces(SimplexVector, 0.25); @@ -131,24 +131,24 @@ int main(int argc, char * const argv[]) { /* An edge [10,12,2] */ - std::cout << "The complex contains " << st.num_simplices() << " simplices - " << st.num_vertices() << " vertices " + std::clog << "The complex contains " << st.num_simplices() << " simplices - " << st.num_vertices() << " vertices " << std::endl; - std::cout << " - dimension " << st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" + std::clog << " - dimension " << st.dimension() << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; - std::cout << "**************************************************************" << std::endl; - std::cout << "strict graph G { " << std::endl; + std::clog << "**************************************************************" << std::endl; + std::clog << "strict graph G { " << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << static_cast(vertex) << " -- "; + std::clog << static_cast(vertex) << " -- "; } - std::cout << ";" << std::endl; + std::clog << ";" << std::endl; } - std::cout << "}" << std::endl; - std::cout << "**************************************************************" << std::endl; + std::clog << "}" << std::endl; + std::clog << "**************************************************************" << std::endl; // Compute the persistence diagram of the complex Persistent_cohomology pcoh(st); diff --git a/src/Persistent_cohomology/example/plain_homology.cpp b/src/Persistent_cohomology/example/plain_homology.cpp index 84333e46..4d329020 100644 --- a/src/Persistent_cohomology/example/plain_homology.cpp +++ b/src/Persistent_cohomology/example/plain_homology.cpp @@ -83,9 +83,9 @@ int main() { pcoh.output_diagram(); // Print the Betti numbers are b0=2 and b1=2. - std::cout << std::endl; - std::cout << "The Betti numbers are : "; + std::clog << std::endl; + std::clog << "The Betti numbers are : "; for (int i = 0; i < 3; i++) - std::cout << "b" << i << " = " << pcoh.betti_number(i) << " ; "; - std::cout << std::endl; + std::clog << "b" << i << " = " << pcoh.betti_number(i) << " ; "; + std::clog << std::endl; } diff --git a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp index 9eb5ccfc..e2e2c0a5 100644 --- a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp +++ b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp @@ -56,8 +56,8 @@ int main(int argc, char * argv[]) { Simplex_tree simplex_tree; rips_complex_from_file.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -99,7 +99,7 @@ void program_options(int argc, char * argv[] visible.add_options() ("help,h", "produce help message") ("output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout") + "Name of file in which the persistence diagram is written. Default print in std::clog") ("max-edge-length,r", po::value(&threshold)->default_value(0), "Maximal length of an edge for the Rips complex construction.") ("cpx-dimension,d", po::value(&dim_max)->default_value(1), @@ -123,20 +123,20 @@ void program_options(int argc, char * argv[] po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with various coefficient fields \n"; - std::cout << "of a Rips complex defined on a set of input points. The coefficient \n"; - std::cout << "fields are all the Z/rZ for a prime number r contained in the \n"; - std::cout << "specified range [p,q]\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p1*...*pr dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p1*...*pr is the product of prime numbers pi such that the homology \n"; - std::cout << "feature exists in homology with Z/piZ coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with various coefficient fields \n"; + std::clog << "of a Rips complex defined on a set of input points. The coefficient \n"; + std::clog << "fields are all the Z/rZ for a prime number r contained in the \n"; + std::clog << "specified range [p,q]\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p1*...*pr dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p1*...*pr is the product of prime numbers pi such that the homology \n"; + std::clog << "feature exists in homology with Z/piZ coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp index 02db05ec..7da9f15d 100644 --- a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp +++ b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp @@ -73,8 +73,8 @@ int main(int argc, char * argv[]) { // expand the graph until dimension dim_max st.expansion(dim_max); - std::cout << "The complex contains " << st.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << st.dimension() << " \n"; + std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << st.dimension() << " \n"; // Sort the simplices in the order of the filtration st.initialize_filtration(); @@ -115,7 +115,7 @@ void program_options(int argc, char * argv[] visible.add_options() ("help,h", "produce help message") ("output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout") + "Name of file in which the persistence diagram is written. Default print in std::clog") ("max-edge-length,r", po::value(&threshold)->default_value(std::numeric_limits::infinity()), "Maximal length of an edge for the Rips complex construction.") @@ -138,17 +138,17 @@ void program_options(int argc, char * argv[] po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp index 37fa5e93..db456f70 100644 --- a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp +++ b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp @@ -64,8 +64,8 @@ int main(int argc, char * argv[]) { Simplex_tree& st = *new Simplex_tree; rips_complex_from_file.create_complex(st, dim_max); - std::cout << "The complex contains " << st.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << st.dimension() << " \n"; + std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << st.dimension() << " \n"; #ifdef GUDHI_USE_TBB // Unnecessary, but clarifies which operations are parallel. @@ -122,7 +122,7 @@ void program_options(int argc, char * argv[] visible.add_options() ("help,h", "produce help message") ("output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout") + "Name of file in which the persistence diagram is written. Default print in std::clog") ("max-edge-length,r", po::value(&threshold)->default_value(0), "Maximal length of an edge for the Rips complex construction.") ("cpx-dimension,d", po::value(&dim_max)->default_value(1), @@ -144,17 +144,17 @@ void program_options(int argc, char * argv[] po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index 0f1876d0..f556a064 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -561,7 +561,7 @@ class Persistent_cohomology { * p1*...*pr is the product of prime numbers pi such that the homology * feature exists in homology with Z/piZ coefficients. */ - void output_diagram(std::ostream& ostream = std::cout) { + void output_diagram(std::ostream& ostream = std::clog) { cmp_intervals_by_length cmp(cpx_); std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); bool has_infinity = std::numeric_limits::has_infinity; diff --git a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp index b9f11607..7a2feeff 100644 --- a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp +++ b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers ) // 2 1 0 inf // means that in Z/2Z-homology, the Betti numbers are b0=2 and b1=1. - std::cout << "BETTI NUMBERS" << std::endl; + std::clog << "BETTI NUMBERS" << std::endl; BOOST_CHECK(pcoh.betti_number(0) == 2); BOOST_CHECK(pcoh.betti_number(1) == 1); @@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers ) BOOST_CHECK(bns[1] == 1); BOOST_CHECK(bns[2] == 0); - std::cout << "GET PERSISTENT PAIRS" << std::endl; + std::clog << "GET PERSISTENT PAIRS" << std::endl; // Custom sort and output persistence cmp_intervals_by_dim_then_length cmp(&st); @@ -118,12 +118,12 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers ) BOOST_CHECK(st.filtration(get<0>(persistent_pairs[2])) == 0); BOOST_CHECK(get<1>(persistent_pairs[2]) == st.null_simplex()); - std::cout << "INTERVALS IN DIMENSION" << std::endl; + std::clog << "INTERVALS IN DIMENSION" << std::endl; auto intervals_in_dimension_0 = pcoh.intervals_in_dimension(0); - std::cout << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl; + std::clog << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl; for (std::size_t i = 0; i < intervals_in_dimension_0.size(); i++) - std::cout << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," << + std::clog << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," << intervals_in_dimension_0[i].second << "]" << std::endl; BOOST_CHECK(intervals_in_dimension_0.size() == 2); BOOST_CHECK(intervals_in_dimension_0[0].first == 0); @@ -133,16 +133,16 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers ) auto intervals_in_dimension_1 = pcoh.intervals_in_dimension(1); - std::cout << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl; + std::clog << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl; for (std::size_t i = 0; i < intervals_in_dimension_1.size(); i++) - std::cout << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," << + std::clog << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," << intervals_in_dimension_1[i].second << "]" << std::endl; BOOST_CHECK(intervals_in_dimension_1.size() == 1); BOOST_CHECK(intervals_in_dimension_1[0].first == 0); BOOST_CHECK(intervals_in_dimension_1[0].second == std::numeric_limits::infinity()); auto intervals_in_dimension_2 = pcoh.intervals_in_dimension(2); - std::cout << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl; + std::clog << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl; BOOST_CHECK(intervals_in_dimension_2.size() == 0); } @@ -259,12 +259,12 @@ BOOST_AUTO_TEST_CASE( betti_numbers ) BOOST_CHECK(st.filtration(get<0>(persistent_pairs[2])) == 1); BOOST_CHECK(get<1>(persistent_pairs[2]) == st.null_simplex()); - std::cout << "INTERVALS IN DIMENSION" << std::endl; + std::clog << "INTERVALS IN DIMENSION" << std::endl; auto intervals_in_dimension_0 = pcoh.intervals_in_dimension(0); - std::cout << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl; + std::clog << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl; for (std::size_t i = 0; i < intervals_in_dimension_0.size(); i++) - std::cout << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," << + std::clog << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," << intervals_in_dimension_0[i].second << "]" << std::endl; BOOST_CHECK(intervals_in_dimension_0.size() == 2); BOOST_CHECK(intervals_in_dimension_0[0].first == 2); @@ -273,19 +273,19 @@ BOOST_AUTO_TEST_CASE( betti_numbers ) BOOST_CHECK(intervals_in_dimension_0[1].second == std::numeric_limits::infinity()); auto intervals_in_dimension_1 = pcoh.intervals_in_dimension(1); - std::cout << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl; + std::clog << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl; for (std::size_t i = 0; i < intervals_in_dimension_1.size(); i++) - std::cout << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," << + std::clog << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," << intervals_in_dimension_1[i].second << "]" << std::endl; BOOST_CHECK(intervals_in_dimension_1.size() == 1); BOOST_CHECK(intervals_in_dimension_1[0].first == 4); BOOST_CHECK(intervals_in_dimension_1[0].second == std::numeric_limits::infinity()); auto intervals_in_dimension_2 = pcoh.intervals_in_dimension(2); - std::cout << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl; + std::clog << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl; BOOST_CHECK(intervals_in_dimension_2.size() == 0); - std::cout << "EMPTY COMPLEX" << std::endl; + std::clog << "EMPTY COMPLEX" << std::endl; Simplex_tree empty; empty.initialize_filtration(); St_persistence pcoh_empty(empty, false); diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp index a1c106d5..fe3f8517 100644 --- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp +++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp @@ -30,7 +30,7 @@ std::string test_rips_persistence(int coefficient, int min_persistence) { simplex_tree_stream.close(); // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension() + std::clog << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension() << std::endl; // Check @@ -76,11 +76,11 @@ void test_rips_persistence_in_dimension(int dimension) { value8.insert(0,std::to_string(dimension)); value9.insert(0,std::to_string(dimension)); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=0" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=0" << std::endl; std::string str_rips_persistence = test_rips_persistence(dimension, 0); - std::cout << str_rips_persistence << std::endl; + std::clog << str_rips_persistence << std::endl; BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found @@ -92,10 +92,10 @@ void test_rips_persistence_in_dimension(int dimension) { BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found - std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl; + std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=1" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=1" << std::endl; str_rips_persistence = test_rips_persistence(dimension, 1); @@ -109,10 +109,10 @@ void test_rips_persistence_in_dimension(int dimension) { BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found - std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl; + std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=2" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=2" << std::endl; str_rips_persistence = test_rips_persistence(dimension, 2); @@ -126,10 +126,10 @@ void test_rips_persistence_in_dimension(int dimension) { BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found - std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl; + std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=Inf" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=Inf" << std::endl; str_rips_persistence = test_rips_persistence(dimension, (std::numeric_limits::max)()); @@ -143,7 +143,7 @@ void test_rips_persistence_in_dimension(int dimension) { BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found - std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl; + std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl; } BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_1 ) diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp index 9e767943..3602aa09 100644 --- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp +++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp @@ -30,7 +30,7 @@ std::string test_rips_persistence(int min_coefficient, int max_coefficient, doub simplex_tree_stream.close(); // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension() + std::clog << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension() << std::endl; // Check @@ -68,11 +68,11 @@ void test_rips_persistence_in_dimension(int min_dimension, int max_dimension) { std::string value6(" 2 0.3 inf"); std::string value7(" 2 0.4 inf"); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_DIM=" << min_dimension << " MAX_DIM=" << max_dimension << " MIN_PERS=0" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_DIM=" << min_dimension << " MAX_DIM=" << max_dimension << " MIN_PERS=0" << std::endl; std::string str_rips_persistence = test_rips_persistence(min_dimension, max_dimension, 0.0); - std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl; + std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl; BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp index 05bacb9f..3d2ba54f 100644 --- a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp +++ b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp @@ -63,18 +63,18 @@ int main() { // have a reverse filtration (i.e. filtration of boundary of each simplex S // is greater or equal to the filtration of S). // ---------------------------------------------------------------------------- - std::cout << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " + std::clog << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " + std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << std::endl; } return 0; diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp index bbc3c755..25f93b03 100644 --- a/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp +++ b/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp @@ -39,19 +39,19 @@ int main() { // ---------------------------------------------------------------------------- // Display information about the one skeleton Rips complex // ---------------------------------------------------------------------------- - std::cout << "Rips complex is of dimension " << stree.dimension() << + std::clog << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << + std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << stree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] "; + std::clog << std::endl; } return 0; diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp index a1db8910..d9df245b 100644 --- a/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp +++ b/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp @@ -34,19 +34,19 @@ int main() { // ---------------------------------------------------------------------------- // Display information about the one skeleton Rips complex // ---------------------------------------------------------------------------- - std::cout << "Rips complex is of dimension " << stree.dimension() << + std::clog << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; - std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << + std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; for (auto f_simplex : stree.filtration_simplex_range()) { - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << stree.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] "; + std::clog << std::endl; } return 0; } diff --git a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp index b7040453..c0c57e7b 100644 --- a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp +++ b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp @@ -42,7 +42,7 @@ int main(int argc, char **argv) { ouput_file_stream.open(std::string(argv[4])); streambuffer = ouput_file_stream.rdbuf(); } else { - streambuffer = std::cout.rdbuf(); + streambuffer = std::clog.rdbuf(); } Simplex_tree stree; diff --git a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp index 36b468a7..9aa7a657 100644 --- a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp +++ b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp @@ -41,7 +41,7 @@ int main(int argc, char **argv) { ouput_file_stream.open(std::string(argv[4])); streambuffer = ouput_file_stream.rdbuf(); } else { - streambuffer = std::cout.rdbuf(); + streambuffer = std::clog.rdbuf(); } Simplex_tree stree; diff --git a/src/Rips_complex/example/example_sparse_rips.cpp b/src/Rips_complex/example/example_sparse_rips.cpp index 1c95b48c..4bd31103 100644 --- a/src/Rips_complex/example/example_sparse_rips.cpp +++ b/src/Rips_complex/example/example_sparse_rips.cpp @@ -25,6 +25,6 @@ int main() { // ---------------------------------------------------------------------------- // Display information about the complex // ---------------------------------------------------------------------------- - std::cout << "Sparse Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() + std::clog << "Sparse Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - " << stree.num_vertices() << " vertices." << std::endl; } diff --git a/src/Rips_complex/test/test_rips_complex.cpp b/src/Rips_complex/test/test_rips_complex.cpp index 1225f8df..19dcd283 100644 --- a/src/Rips_complex/test/test_rips_complex.cpp +++ b/src/Rips_complex/test/test_rips_complex.cpp @@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) { // ---------------------------------------------------------------------------- std::string off_file_name("alphacomplexdoc.off"); double rips_threshold = 12.0; - std::cout << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" << + std::clog << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" << rips_threshold << "==========" << std::endl; Gudhi::Points_off_reader off_reader(off_file_name); @@ -52,14 +52,14 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) { const int DIMENSION_1 = 1; Simplex_tree st; rips_complex_from_file.create_complex(st, DIMENSION_1); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION_1); const int NUMBER_OF_VERTICES = 7; - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 18); // Check filtration values of vertices is 0.0 @@ -71,12 +71,12 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) { for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) { if (DIMENSION_1 == st.dimension(f_simplex)) { std::vector vp; - std::cout << "vertex = ("; + std::clog << "vertex = ("; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << ","; + std::clog << vertex << ","; vp.push_back(off_reader.get_point_cloud().at(vertex)); } - std::cout << ") - distance =" << Gudhi::Euclidean_distance()(vp.at(0), vp.at(1)) << + std::clog << ") - distance =" << Gudhi::Euclidean_distance()(vp.at(0), vp.at(1)) << " - filtration =" << st.filtration(f_simplex) << std::endl; BOOST_CHECK(vp.size() == 2); GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), Gudhi::Euclidean_distance()(vp.at(0), vp.at(1))); @@ -86,46 +86,46 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) { const int DIMENSION_2 = 2; Simplex_tree st2; rips_complex_from_file.create_complex(st2, DIMENSION_2); - std::cout << "st2.dimension()=" << st2.dimension() << std::endl; + std::clog << "st2.dimension()=" << st2.dimension() << std::endl; BOOST_CHECK(st2.dimension() == DIMENSION_2); - std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl; + std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl; BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl; + std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl; BOOST_CHECK(st2.num_simplices() == 23); Simplex_tree::Filtration_value f01 = st2.filtration(st2.find({0, 1})); Simplex_tree::Filtration_value f02 = st2.filtration(st2.find({0, 2})); Simplex_tree::Filtration_value f12 = st2.filtration(st2.find({1, 2})); Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2})); - std::cout << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl; + std::clog << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::max(f01, std::max(f02,f12))); Simplex_tree::Filtration_value f45 = st2.filtration(st2.find({4, 5})); Simplex_tree::Filtration_value f56 = st2.filtration(st2.find({5, 6})); Simplex_tree::Filtration_value f46 = st2.filtration(st2.find({4, 6})); Simplex_tree::Filtration_value f456 = st2.filtration(st2.find({4, 5, 6})); - std::cout << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl; + std::clog << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f456, std::max(f45, std::max(f56,f46))); const int DIMENSION_3 = 3; Simplex_tree st3; rips_complex_from_file.create_complex(st3, DIMENSION_3); - std::cout << "st3.dimension()=" << st3.dimension() << std::endl; + std::clog << "st3.dimension()=" << st3.dimension() << std::endl; BOOST_CHECK(st3.dimension() == DIMENSION_3); - std::cout << "st3.num_vertices()=" << st3.num_vertices() << std::endl; + std::clog << "st3.num_vertices()=" << st3.num_vertices() << std::endl; BOOST_CHECK(st3.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st3.num_simplices()=" << st3.num_simplices() << std::endl; + std::clog << "st3.num_simplices()=" << st3.num_simplices() << std::endl; BOOST_CHECK(st3.num_simplices() == 24); Simplex_tree::Filtration_value f123 = st3.filtration(st3.find({1, 2, 3})); Simplex_tree::Filtration_value f013 = st3.filtration(st3.find({0, 1, 3})); Simplex_tree::Filtration_value f023 = st3.filtration(st3.find({0, 2, 3})); Simplex_tree::Filtration_value f0123 = st3.filtration(st3.find({0, 1, 2, 3})); - std::cout << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 << + std::clog << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 << " - f023= " << f023 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f0123, std::max(f012, std::max(f123, std::max(f013, f023)))); @@ -176,34 +176,34 @@ BOOST_AUTO_TEST_CASE(Rips_complex_from_points) { // ---------------------------------------------------------------------------- Rips_complex rips_complex_from_points(points, 2.0, Custom_square_euclidean_distance()); - std::cout << "========== Rips_complex_from_points ==========" << std::endl; + std::clog << "========== Rips_complex_from_points ==========" << std::endl; Simplex_tree st; const int DIMENSION = 3; rips_complex_from_points.create_complex(st, DIMENSION); // Another way to check num_simplices - std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; int num_simplices = 0; for (auto f_simplex : st.filtration_simplex_range()) { num_simplices++; - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << st.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << st.filtration(f_simplex) << "] "; + std::clog << std::endl; } BOOST_CHECK(num_simplices == 15); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 15); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION); - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == 4); for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; + std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; switch (st.dimension(f_simplex)) { case 0: GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0); @@ -241,34 +241,34 @@ BOOST_AUTO_TEST_CASE(Sparse_rips_complex_from_points) { // .001 is small enough that we get a deterministic result matching the exact Rips Sparse_rips_complex sparse_rips(points, Custom_square_euclidean_distance(), .001); - std::cout << "========== Sparse_rips_complex_from_points ==========" << std::endl; + std::clog << "========== Sparse_rips_complex_from_points ==========" << std::endl; Simplex_tree st; const int DIMENSION = 3; sparse_rips.create_complex(st, DIMENSION); // Another way to check num_simplices - std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; + std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl; int num_simplices = 0; for (auto f_simplex : st.filtration_simplex_range()) { num_simplices++; - std::cout << " ( "; + std::clog << " ( "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << ") -> " << "[" << st.filtration(f_simplex) << "] "; - std::cout << std::endl; + std::clog << ") -> " << "[" << st.filtration(f_simplex) << "] "; + std::clog << std::endl; } BOOST_CHECK(num_simplices == 15); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 15); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION); - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == 4); for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; + std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl; switch (st.dimension(f_simplex)) { case 0: GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0); @@ -293,7 +293,7 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) { // ---------------------------------------------------------------------------- std::string csv_file_name("full_square_distance_matrix.csv"); double rips_threshold = 12.0; - std::cout << "========== CSV FILE NAME = " << csv_file_name << " - Rips threshold=" << + std::clog << "========== CSV FILE NAME = " << csv_file_name << " - Rips threshold=" << rips_threshold << "==========" << std::endl; Distance_matrix distances = Gudhi::read_lower_triangular_matrix_from_csv_file(csv_file_name); @@ -302,14 +302,14 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) { const int DIMENSION_1 = 1; Simplex_tree st; rips_complex_from_file.create_complex(st, DIMENSION_1); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; BOOST_CHECK(st.dimension() == DIMENSION_1); const int NUMBER_OF_VERTICES = 7; - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl; + std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl; BOOST_CHECK(st.num_simplices() == 18); // Check filtration values of vertices is 0.0 @@ -321,12 +321,12 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) { for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) { if (DIMENSION_1 == st.dimension(f_simplex)) { std::vector vvh; - std::cout << "vertex = ("; + std::clog << "vertex = ("; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << ","; + std::clog << vertex << ","; vvh.push_back(vertex); } - std::cout << ") - filtration =" << st.filtration(f_simplex) << std::endl; + std::clog << ") - filtration =" << st.filtration(f_simplex) << std::endl; BOOST_CHECK(vvh.size() == 2); GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), distances[vvh.at(0)][vvh.at(1)]); } @@ -335,46 +335,46 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) { const int DIMENSION_2 = 2; Simplex_tree st2; rips_complex_from_file.create_complex(st2, DIMENSION_2); - std::cout << "st2.dimension()=" << st2.dimension() << std::endl; + std::clog << "st2.dimension()=" << st2.dimension() << std::endl; BOOST_CHECK(st2.dimension() == DIMENSION_2); - std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl; + std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl; BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl; + std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl; BOOST_CHECK(st2.num_simplices() == 23); Simplex_tree::Filtration_value f01 = st2.filtration(st2.find({0, 1})); Simplex_tree::Filtration_value f02 = st2.filtration(st2.find({0, 2})); Simplex_tree::Filtration_value f12 = st2.filtration(st2.find({1, 2})); Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2})); - std::cout << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl; + std::clog << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::max(f01, std::max(f02,f12))); Simplex_tree::Filtration_value f45 = st2.filtration(st2.find({4, 5})); Simplex_tree::Filtration_value f56 = st2.filtration(st2.find({5, 6})); Simplex_tree::Filtration_value f46 = st2.filtration(st2.find({4, 6})); Simplex_tree::Filtration_value f456 = st2.filtration(st2.find({4, 5, 6})); - std::cout << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl; + std::clog << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f456, std::max(f45, std::max(f56,f46))); const int DIMENSION_3 = 3; Simplex_tree st3; rips_complex_from_file.create_complex(st3, DIMENSION_3); - std::cout << "st3.dimension()=" << st3.dimension() << std::endl; + std::clog << "st3.dimension()=" << st3.dimension() << std::endl; BOOST_CHECK(st3.dimension() == DIMENSION_3); - std::cout << "st3.num_vertices()=" << st3.num_vertices() << std::endl; + std::clog << "st3.num_vertices()=" << st3.num_vertices() << std::endl; BOOST_CHECK(st3.num_vertices() == NUMBER_OF_VERTICES); - std::cout << "st3.num_simplices()=" << st3.num_simplices() << std::endl; + std::clog << "st3.num_simplices()=" << st3.num_simplices() << std::endl; BOOST_CHECK(st3.num_simplices() == 24); Simplex_tree::Filtration_value f123 = st3.filtration(st3.find({1, 2, 3})); Simplex_tree::Filtration_value f013 = st3.filtration(st3.find({0, 1, 3})); Simplex_tree::Filtration_value f023 = st3.filtration(st3.find({0, 2, 3})); Simplex_tree::Filtration_value f0123 = st3.filtration(st3.find({0, 1, 2, 3})); - std::cout << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 << + std::clog << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 << " - f023= " << f023 << std::endl; GUDHI_TEST_FLOAT_EQUALITY_CHECK(f0123, std::max(f012, std::max(f123, std::max(f013, f023)))); @@ -389,7 +389,7 @@ BOOST_AUTO_TEST_CASE(Rips_create_complex_throw) { // ---------------------------------------------------------------------------- std::string off_file_name("alphacomplexdoc.off"); double rips_threshold = 12.0; - std::cout << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" << + std::clog << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" << rips_threshold << "==========" << std::endl; Gudhi::Points_off_reader off_reader(off_file_name); @@ -398,7 +398,7 @@ BOOST_AUTO_TEST_CASE(Rips_create_complex_throw) { Simplex_tree stree; std::vector simplex = {0, 1, 2}; stree.insert_simplex_and_subfaces(simplex); - std::cout << "Check exception throw in debug mode" << std::endl; + std::clog << "Check exception throw in debug mode" << std::endl; // throw excpt because stree is not empty BOOST_CHECK_THROW (rips_complex_from_file.create_complex(stree, 1), std::invalid_argument); } diff --git a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp index 585de4a0..67f921a6 100644 --- a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp +++ b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp @@ -68,8 +68,8 @@ int main(int argc, char* argv[]) { Simplex_tree simplex_tree; rips_complex_from_file.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -121,7 +121,7 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std:: po::options_description visible("Allowed options", 100); visible.add_options()("help,h", "produce help message")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "min-edge-corelation,c", po::value(&correlation_min)->default_value(0), "Minimal corelation of an edge for the Rips complex construction.")( "cpx-dimension,d", po::value(&dim_max)->default_value(1), @@ -143,17 +143,17 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std:: po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a corelation matrix.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a corelation matrix.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp index ad429e11..4ad19675 100644 --- a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp +++ b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp @@ -47,8 +47,8 @@ int main(int argc, char* argv[]) { Simplex_tree simplex_tree; rips_complex_from_file.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -82,7 +82,7 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std:: po::options_description visible("Allowed options", 100); visible.add_options()("help,h", "produce help message")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-edge-length,r", po::value(&threshold)->default_value(std::numeric_limits::infinity()), "Maximal length of an edge for the Rips complex construction.")( @@ -105,17 +105,17 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std:: po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of distance matrix.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of distance matrix.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Rips_complex/utilities/rips_persistence.cpp b/src/Rips_complex/utilities/rips_persistence.cpp index daa7e1db..4cc63d3c 100644 --- a/src/Rips_complex/utilities/rips_persistence.cpp +++ b/src/Rips_complex/utilities/rips_persistence.cpp @@ -49,8 +49,8 @@ int main(int argc, char* argv[]) { Simplex_tree simplex_tree; rips_complex_from_file.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -84,7 +84,7 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::options_description visible("Allowed options", 100); visible.add_options()("help,h", "produce help message")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-edge-length,r", po::value(&threshold)->default_value(std::numeric_limits::infinity()), "Maximal length of an edge for the Rips complex construction.")( @@ -107,17 +107,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Rips complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Rips complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Rips_complex/utilities/sparse_rips_persistence.cpp b/src/Rips_complex/utilities/sparse_rips_persistence.cpp index cefd8a67..40606158 100644 --- a/src/Rips_complex/utilities/sparse_rips_persistence.cpp +++ b/src/Rips_complex/utilities/sparse_rips_persistence.cpp @@ -51,8 +51,8 @@ int main(int argc, char* argv[]) { Simplex_tree simplex_tree; sparse_rips.create_complex(simplex_tree, dim_max); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -87,7 +87,7 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::options_description visible("Allowed options", 100); visible.add_options()("help,h", "produce help message")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-edge-length,r", po::value(&threshold)->default_value(std::numeric_limits::infinity()), "Maximal length of an edge for the Rips complex construction.")( @@ -112,17 +112,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std:: po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a sparse 1/(1-epsilon)-approximation of the Rips complex \ndefined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a sparse 1/(1-epsilon)-approximation of the Rips complex \ndefined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp b/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp index d716fb1f..0e7e382b 100644 --- a/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp +++ b/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp @@ -55,18 +55,18 @@ class Cech_blocker { bool operator()(Simplex_handle sh) { std::vector points; #if DEBUG_TRACES - std::cout << "Cech_blocker on ["; + std::clog << "Cech_blocker on ["; #endif // DEBUG_TRACES for (auto vertex : simplex_tree_.simplex_vertex_range(sh)) { points.push_back(point_cloud_[vertex]); #if DEBUG_TRACES - std::cout << vertex << ", "; + std::clog << vertex << ", "; #endif // DEBUG_TRACES } Min_sphere ms(points.begin(), points.end()); Filtration_value radius = ms.radius(); #if DEBUG_TRACES - std::cout << "] - radius = " << radius << " - returns " << (radius > threshold_) << std::endl; + std::clog << "] - radius = " << radius << " - returns " << (radius > threshold_) << std::endl; #endif // DEBUG_TRACES simplex_tree_.assign_filtration(sh, radius); return (radius > threshold_); @@ -106,24 +106,24 @@ int main(int argc, char* argv[]) { // expand the graph until dimension dim_max st.expansion_with_blockers(dim_max, Cech_blocker(st, threshold, off_reader.get_point_cloud())); - std::cout << "The complex contains " << st.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << st.dimension() << " \n"; + std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << st.dimension() << " \n"; // Sort the simplices in the order of the filtration st.initialize_filtration(); #if DEBUG_TRACES - std::cout << "********************************************************************\n"; + std::clog << "********************************************************************\n"; // Display the Simplex_tree - Can not be done in the middle of 2 inserts - std::cout << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << static_cast(vertex) << " "; + std::clog << static_cast(vertex) << " "; } - std::cout << std::endl; + std::clog << std::endl; } #endif // DEBUG_TRACES return 0; @@ -154,11 +154,11 @@ void program_options(int argc, char* argv[], std::string& off_file_points, Filtr po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Construct a Cech complex defined on a set of input points.\n \n"; + std::clog << std::endl; + std::clog << "Construct a Cech complex defined on a set of input points.\n \n"; - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp b/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp index e455c426..8ee7ab74 100644 --- a/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp +++ b/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp @@ -63,7 +63,7 @@ Vertex_list from(const Cell_handle& ch) { Vertex_list the_list; for (auto i = 0; i < 4; i++) { #ifdef DEBUG_TRACES - std::cout << "from cell[" << i << "]=" << ch->vertex(i)->point() << std::endl; + std::clog << "from cell[" << i << "]=" << ch->vertex(i)->point() << std::endl; #endif // DEBUG_TRACES the_list.push_back(ch->vertex(i)); } @@ -75,7 +75,7 @@ Vertex_list from(const Facet& fct) { for (auto i = 0; i < 4; i++) { if (fct.second != i) { #ifdef DEBUG_TRACES - std::cout << "from facet=[" << i << "]" << fct.first->vertex(i)->point() << std::endl; + std::clog << "from facet=[" << i << "]" << fct.first->vertex(i)->point() << std::endl; #endif // DEBUG_TRACES the_list.push_back(fct.first->vertex(i)); } @@ -88,7 +88,7 @@ Vertex_list from(const Edge& edg) { for (auto i = 0; i < 4; i++) { if ((edg.second == i) || (edg.third == i)) { #ifdef DEBUG_TRACES - std::cout << "from edge[" << i << "]=" << edg.first->vertex(i)->point() << std::endl; + std::clog << "from edge[" << i << "]=" << edg.first->vertex(i)->point() << std::endl; #endif // DEBUG_TRACES the_list.push_back(edg.first->vertex(i)); } @@ -99,7 +99,7 @@ Vertex_list from(const Edge& edg) { Vertex_list from(const Alpha_shape_3::Vertex_handle& vh) { Vertex_list the_list; #ifdef DEBUG_TRACES - std::cout << "from vertex=" << vh->point() << std::endl; + std::clog << "from vertex=" << vh->point() << std::endl; #endif // DEBUG_TRACES the_list.push_back(vh); return the_list; @@ -128,7 +128,7 @@ int main(int argc, char * const argv[]) { // alpha shape construction from points. CGAL has a strange behavior in REGULARIZED mode. Alpha_shape_3 as(lp.begin(), lp.end(), 0, Alpha_shape_3::GENERAL); #ifdef DEBUG_TRACES - std::cout << "Alpha shape computed in GENERAL mode" << std::endl; + std::clog << "Alpha shape computed in GENERAL mode" << std::endl; #endif // DEBUG_TRACES // filtration with alpha values from alpha shape @@ -140,7 +140,7 @@ int main(int argc, char * const argv[]) { as.filtration_with_alpha_values(disp); #ifdef DEBUG_TRACES - std::cout << "filtration_with_alpha_values returns : " << the_objects.size() << " objects" << std::endl; + std::clog << "filtration_with_alpha_values returns : " << the_objects.size() << " objects" << std::endl; #endif // DEBUG_TRACES Alpha_shape_3::size_type count_vertices = 0; @@ -177,7 +177,7 @@ int main(int argc, char * const argv[]) { // alpha shape not found Simplex_tree_vertex vertex = map_cgal_simplex_tree.size(); #ifdef DEBUG_TRACES - std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] not found - insert_simplex " << vertex << "\n"; + std::clog << "vertex [" << the_alpha_shape_vertex->point() << "] not found - insert_simplex " << vertex << "\n"; #endif // DEBUG_TRACES the_simplex_tree.push_back(vertex); map_cgal_simplex_tree.insert(Alpha_shape_simplex_tree_pair(the_alpha_shape_vertex, vertex)); @@ -185,14 +185,14 @@ int main(int argc, char * const argv[]) { // alpha shape found Simplex_tree_vertex vertex = the_map_iterator->second; #ifdef DEBUG_TRACES - std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] found in " << vertex << std::endl; + std::clog << "vertex [" << the_alpha_shape_vertex->point() << "] found in " << vertex << std::endl; #endif // DEBUG_TRACES the_simplex_tree.push_back(vertex); } } // Construction of the simplex_tree #ifdef DEBUG_TRACES - std::cout << "filtration = " << *the_alpha_value_iterator << std::endl; + std::clog << "filtration = " << *the_alpha_value_iterator << std::endl; #endif // DEBUG_TRACES simplex_tree.insert_simplex(the_simplex_tree, std::sqrt(*the_alpha_value_iterator)); if (the_alpha_value_iterator != the_alpha_values.end()) @@ -201,61 +201,61 @@ int main(int argc, char * const argv[]) { std::cerr << "This shall not happen" << std::endl; } #ifdef DEBUG_TRACES - std::cout << "vertices \t\t" << count_vertices << std::endl; - std::cout << "edges \t\t" << count_edges << std::endl; - std::cout << "facets \t\t" << count_facets << std::endl; - std::cout << "cells \t\t" << count_cells << std::endl; + std::clog << "vertices \t\t" << count_vertices << std::endl; + std::clog << "edges \t\t" << count_edges << std::endl; + std::clog << "facets \t\t" << count_facets << std::endl; + std::clog << "cells \t\t" << count_cells << std::endl; - std::cout << "Information of the Simplex Tree:\n"; - std::cout << " Number of vertices = " << simplex_tree.num_vertices() << " "; - std::cout << " Number of simplices = " << simplex_tree.num_simplices() << std::endl << std::endl; + std::clog << "Information of the Simplex Tree:\n"; + std::clog << " Number of vertices = " << simplex_tree.num_vertices() << " "; + std::clog << " Number of simplices = " << simplex_tree.num_simplices() << std::endl << std::endl; #endif // DEBUG_TRACES #ifdef DEBUG_TRACES - std::cout << "Iterator on vertices: \n"; + std::clog << "Iterator on vertices: \n"; for (auto vertex : simplex_tree.complex_vertex_range()) { - std::cout << vertex << " "; + std::clog << vertex << " "; } #endif // DEBUG_TRACES - std::cout << simplex_tree << std::endl; + std::clog << simplex_tree << std::endl; #ifdef DEBUG_TRACES - std::cout << std::endl << std::endl << "Iterator on simplices:\n"; + std::clog << std::endl << std::endl << "Iterator on simplices:\n"; for (auto simplex : simplex_tree.complex_simplex_range()) { - std::cout << " "; + std::clog << " "; for (auto vertex : simplex_tree.simplex_vertex_range(simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } #endif // DEBUG_TRACES #ifdef DEBUG_TRACES - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } #endif // DEBUG_TRACES #ifdef DEBUG_TRACES - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, and their boundary simplices:\n"; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, and their boundary simplices:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; for (auto b_simplex : simplex_tree.boundary_simplex_range(f_simplex)) { - std::cout << " " << "[" << simplex_tree.filtration(b_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(b_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(b_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } #endif // DEBUG_TRACES diff --git a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp index 494f8b1d..df52bf43 100644 --- a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp +++ b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp @@ -34,31 +34,31 @@ int main(int argc, char* const argv[]) { stree.expansion_with_blockers(3, [&](Simplex_handle sh) { bool result = false; - std::cout << "Blocker on ["; + std::clog << "Blocker on ["; // User can loop on the vertices from the given simplex_handle i.e. for (auto vertex : stree.simplex_vertex_range(sh)) { // We block the expansion, if the vertex '6' is in the given list of vertices if (vertex == 6) result = true; - std::cout << vertex << ", "; + std::clog << vertex << ", "; } - std::cout << "] ( " << stree.filtration(sh); + std::clog << "] ( " << stree.filtration(sh); // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries) stree.assign_filtration(sh, stree.filtration(sh) + 1.); - std::cout << " + 1. ) = " << result << std::endl; + std::clog << " + 1. ) = " << result << std::endl; return result; }); - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << stree.num_simplices() << " simplices"; - std::cout << " - dimension " << stree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << stree.num_simplices() << " simplices"; + std::clog << " - dimension " << stree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : stree.filtration_simplex_range()) { - std::cout << " " + std::clog << " " << "[" << stree.filtration(f_simplex) << "] "; - for (auto vertex : stree.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : stree.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } return 0; diff --git a/src/Simplex_tree/example/mini_simplex_tree.cpp b/src/Simplex_tree/example/mini_simplex_tree.cpp index bbc582c7..4043bffd 100644 --- a/src/Simplex_tree/example/mini_simplex_tree.cpp +++ b/src/Simplex_tree/example/mini_simplex_tree.cpp @@ -48,7 +48,7 @@ int main() { for (ST::Simplex_handle t : st.cofaces_simplex_range(e, 1)) { // Only coface is 012 for (ST::Vertex_handle v : st.simplex_vertex_range(t)) // v in { 0, 1, 2 } - std::cout << v; - std::cout << '\n'; + std::clog << v; + std::clog << '\n'; } } diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp index 4353939f..ca39df5b 100644 --- a/src/Simplex_tree/example/simple_simplex_tree.cpp +++ b/src/Simplex_tree/example/simple_simplex_tree.cpp @@ -28,8 +28,8 @@ int main(int argc, char* const argv[]) { const Filtration_value FOURTH_FILTRATION_VALUE = 0.4; // TEST OF INSERTION - std::cout << "********************************************************************" << std::endl; - std::cout << "EXAMPLE OF SIMPLE INSERTION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "EXAMPLE OF SIMPLE INSERTION" << std::endl; // Construct the Simplex Tree Simplex_tree simplexTree; @@ -41,140 +41,140 @@ int main(int argc, char* const argv[]) { /* 2 0 3 */ // ++ FIRST - std::cout << " * INSERT 0" << std::endl; + std::clog << " * INSERT 0" << std::endl; typeVectorVertex firstSimplexVector = {0}; typePairSimplexBool returnValue = simplexTree.insert_simplex(firstSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + 0 INSERTED" << std::endl; + std::clog << " + 0 INSERTED" << std::endl; } else { - std::cout << " - 0 NOT INSERTED" << std::endl; + std::clog << " - 0 NOT INSERTED" << std::endl; } // ++ SECOND - std::cout << " * INSERT 1" << std::endl; + std::clog << " * INSERT 1" << std::endl; typeVectorVertex secondSimplexVector = {1}; returnValue = simplexTree.insert_simplex(secondSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + 1 INSERTED" << std::endl; + std::clog << " + 1 INSERTED" << std::endl; } else { - std::cout << " - 1 NOT INSERTED" << std::endl; + std::clog << " - 1 NOT INSERTED" << std::endl; } // ++ THIRD - std::cout << " * INSERT (0,1)" << std::endl; + std::clog << " * INSERT (0,1)" << std::endl; typeVectorVertex thirdSimplexVector = {0, 1}; returnValue = simplexTree.insert_simplex(thirdSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (0,1) INSERTED" << std::endl; + std::clog << " + (0,1) INSERTED" << std::endl; } else { - std::cout << " - (0,1) NOT INSERTED" << std::endl; + std::clog << " - (0,1) NOT INSERTED" << std::endl; } // ++ FOURTH - std::cout << " * INSERT 2" << std::endl; + std::clog << " * INSERT 2" << std::endl; typeVectorVertex fourthSimplexVector = {2}; returnValue = simplexTree.insert_simplex(fourthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + 2 INSERTED" << std::endl; + std::clog << " + 2 INSERTED" << std::endl; } else { - std::cout << " - 2 NOT INSERTED" << std::endl; + std::clog << " - 2 NOT INSERTED" << std::endl; } // ++ FIFTH - std::cout << " * INSERT (2,0)" << std::endl; + std::clog << " * INSERT (2,0)" << std::endl; typeVectorVertex fifthSimplexVector = {2, 0}; returnValue = simplexTree.insert_simplex(fifthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (2,0) INSERTED" << std::endl; + std::clog << " + (2,0) INSERTED" << std::endl; } else { - std::cout << " - (2,0) NOT INSERTED" << std::endl; + std::clog << " - (2,0) NOT INSERTED" << std::endl; } // ++ SIXTH - std::cout << " * INSERT (2,1)" << std::endl; + std::clog << " * INSERT (2,1)" << std::endl; typeVectorVertex sixthSimplexVector = {2, 1}; returnValue = simplexTree.insert_simplex(sixthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (2,1) INSERTED" << std::endl; + std::clog << " + (2,1) INSERTED" << std::endl; } else { - std::cout << " - (2,1) NOT INSERTED" << std::endl; + std::clog << " - (2,1) NOT INSERTED" << std::endl; } // ++ SEVENTH - std::cout << " * INSERT (2,1,0)" << std::endl; + std::clog << " * INSERT (2,1,0)" << std::endl; typeVectorVertex seventhSimplexVector = {2, 1, 0}; returnValue = simplexTree.insert_simplex(seventhSimplexVector, Filtration_value(THIRD_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (2,1,0) INSERTED" << std::endl; + std::clog << " + (2,1,0) INSERTED" << std::endl; } else { - std::cout << " - (2,1,0) NOT INSERTED" << std::endl; + std::clog << " - (2,1,0) NOT INSERTED" << std::endl; } // ++ EIGHTH - std::cout << " * INSERT 3" << std::endl; + std::clog << " * INSERT 3" << std::endl; typeVectorVertex eighthSimplexVector = {3}; returnValue = simplexTree.insert_simplex(eighthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + 3 INSERTED" << std::endl; + std::clog << " + 3 INSERTED" << std::endl; } else { - std::cout << " - 3 NOT INSERTED" << std::endl; + std::clog << " - 3 NOT INSERTED" << std::endl; } // ++ NINETH - std::cout << " * INSERT (3,0)" << std::endl; + std::clog << " * INSERT (3,0)" << std::endl; typeVectorVertex ninethSimplexVector = {3, 0}; returnValue = simplexTree.insert_simplex(ninethSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (3,0) INSERTED" << std::endl; + std::clog << " + (3,0) INSERTED" << std::endl; } else { - std::cout << " - (3,0) NOT INSERTED" << std::endl; + std::clog << " - (3,0) NOT INSERTED" << std::endl; } // ++ TENTH - std::cout << " * INSERT 0 (already inserted)" << std::endl; + std::clog << " * INSERT 0 (already inserted)" << std::endl; typeVectorVertex tenthSimplexVector = {0}; // With a different filtration value returnValue = simplexTree.insert_simplex(tenthSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + 0 INSERTED" << std::endl; + std::clog << " + 0 INSERTED" << std::endl; } else { - std::cout << " - 0 NOT INSERTED" << std::endl; + std::clog << " - 0 NOT INSERTED" << std::endl; } // ++ ELEVENTH - std::cout << " * INSERT (2,1,0) (already inserted)" << std::endl; + std::clog << " * INSERT (2,1,0) (already inserted)" << std::endl; typeVectorVertex eleventhSimplexVector = {2, 1, 0}; returnValue = simplexTree.insert_simplex(eleventhSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE)); if (returnValue.second == true) { - std::cout << " + (2,1,0) INSERTED" << std::endl; + std::clog << " + (2,1,0) INSERTED" << std::endl; } else { - std::cout << " - (2,1,0) NOT INSERTED" << std::endl; + std::clog << " - (2,1,0) NOT INSERTED" << std::endl; } // ++ GENERAL VARIABLE SET - std::cout << "********************************************************************\n"; + std::clog << "********************************************************************\n"; // Display the Simplex_tree - Can not be done in the middle of 2 inserts - std::cout << "* The complex contains " << simplexTree.num_simplices() << " simplices\n"; - std::cout << " - dimension " << simplexTree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "* The complex contains " << simplexTree.num_simplices() << " simplices\n"; + std::clog << " - dimension " << simplexTree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplexTree.filtration_simplex_range()) { - std::cout << " " + std::clog << " " << "[" << simplexTree.filtration(f_simplex) << "] "; - for (auto vertex : simplexTree.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : simplexTree.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } // [0.1] 0 // [0.1] 1 @@ -190,66 +190,66 @@ int main(int argc, char* const argv[]) { // Find in the simplex_tree // ------------------------------------------------------------------------------------------------------------------ Simplex_tree::Simplex_handle simplexFound = simplexTree.find(secondSimplexVector); - std::cout << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; typeVectorVertex unknownSimplexVector = {15}; simplexFound = simplexTree.find(unknownSimplexVector); - std::cout << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; simplexFound = simplexTree.find(fifthSimplexVector); - std::cout << "**************IS THE SIMPLEX {2,0} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {2,0} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; typeVectorVertex otherSimplexVector = {1, 15}; simplexFound = simplexTree.find(otherSimplexVector); - std::cout << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; typeVectorVertex invSimplexVector = {1, 2, 0}; simplexFound = simplexTree.find(invSimplexVector); - std::cout << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; simplexFound = simplexTree.find({0, 1}); - std::cout << "**************IS THE SIMPLEX {0,1} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {0,1} IN THE SIMPLEX TREE ?\n"; if (simplexFound != simplexTree.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; - std::cout << "**************COFACES OF {0,1} IN CODIMENSION 1 ARE\n"; + std::clog << "**************COFACES OF {0,1} IN CODIMENSION 1 ARE\n"; for (auto& simplex : simplexTree.cofaces_simplex_range(simplexTree.find({0, 1}), 1)) { - for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } - std::cout << "**************STARS OF {0,1} ARE\n"; + std::clog << "**************STARS OF {0,1} ARE\n"; for (auto& simplex : simplexTree.star_simplex_range(simplexTree.find({0, 1}))) { - for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } - std::cout << "**************BOUNDARIES OF {0,1,2} ARE\n"; + std::clog << "**************BOUNDARIES OF {0,1,2} ARE\n"; for (auto& simplex : simplexTree.boundary_simplex_range(simplexTree.find({0, 1, 2}))) { - for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } return 0; diff --git a/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp b/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp index f6dfa53c..6278efa7 100644 --- a/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp +++ b/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp @@ -42,67 +42,67 @@ int main(int argc, char * const argv[]) { // insert the graph in the simplex tree as 1-skeleton st.insert_graph(g); end = clock(); - std::cout << "Insert the 1-skeleton in the simplex tree in " + std::clog << "Insert the 1-skeleton in the simplex tree in " << static_cast(end - start) / CLOCKS_PER_SEC << " s. \n"; start = clock(); // expand the 1-skeleton until dimension max_dim st.expansion(max_dim); end = clock(); - std::cout << "max_dim = " << max_dim << "\n"; - std::cout << "Expand the simplex tree in " + std::clog << "max_dim = " << max_dim << "\n"; + std::clog << "Expand the simplex tree in " << static_cast(end - start) / CLOCKS_PER_SEC << " s. \n"; - std::cout << "Information of the Simplex Tree: " << std::endl; - std::cout << " Number of vertices = " << st.num_vertices() << " "; - std::cout << " Number of simplices = " << st.num_simplices() << std::endl; - std::cout << std::endl << std::endl; + std::clog << "Information of the Simplex Tree: " << std::endl; + std::clog << " Number of vertices = " << st.num_vertices() << " "; + std::clog << " Number of simplices = " << st.num_simplices() << std::endl; + std::clog << std::endl << std::endl; - std::cout << "Iterator on vertices: "; + std::clog << "Iterator on vertices: "; for (auto vertex : st.complex_vertex_range()) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; - std::cout << "Iterator on simplices: " << std::endl; + std::clog << "Iterator on simplices: " << std::endl; for (auto simplex : st.complex_simplex_range()) { - std::cout << " "; + std::clog << " "; for (auto vertex : st.simplex_vertex_range(simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; - std::cout << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; - std::cout << "Iterator on Simplices in the filtration, and their boundary simplices:" << std::endl; + std::clog << "Iterator on Simplices in the filtration, and their boundary simplices:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; for (auto b_simplex : st.boundary_simplex_range(f_simplex)) { - std::cout << " " << "[" << st.filtration(b_simplex) << "] "; + std::clog << " " << "[" << st.filtration(b_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(b_simplex)) { - std::cout << vertex << " "; + std::clog << vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } return 0; diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 76608008..6e80b77f 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -286,7 +286,7 @@ class Simplex_tree { /** \brief User-defined copy constructor reproduces the whole tree structure. */ Simplex_tree(const Simplex_tree& complex_source) { #ifdef DEBUG_TRACES - std::cout << "Simplex_tree copy constructor" << std::endl; + std::clog << "Simplex_tree copy constructor" << std::endl; #endif // DEBUG_TRACES copy_from(complex_source); } @@ -296,7 +296,7 @@ class Simplex_tree { */ Simplex_tree(Simplex_tree && complex_source) { #ifdef DEBUG_TRACES - std::cout << "Simplex_tree move constructor" << std::endl; + std::clog << "Simplex_tree move constructor" << std::endl; #endif // DEBUG_TRACES move_from(complex_source); @@ -313,7 +313,7 @@ class Simplex_tree { /** \brief User-defined copy assignment reproduces the whole tree structure. */ Simplex_tree& operator= (const Simplex_tree& complex_source) { #ifdef DEBUG_TRACES - std::cout << "Simplex_tree copy assignment" << std::endl; + std::clog << "Simplex_tree copy assignment" << std::endl; #endif // DEBUG_TRACES // Self-assignment detection if (&complex_source != this) { @@ -330,7 +330,7 @@ class Simplex_tree { */ Simplex_tree& operator=(Simplex_tree&& complex_source) { #ifdef DEBUG_TRACES - std::cout << "Simplex_tree move assignment" << std::endl; + std::clog << "Simplex_tree move assignment" << std::endl; #endif // DEBUG_TRACES // Self-assignment detection if (&complex_source != this) { @@ -1418,9 +1418,9 @@ class Simplex_tree { for (Simplex_handle sh : complex_simplex_range()) { #ifdef DEBUG_TRACES for (auto vertex : simplex_vertex_range(sh)) { - std::cout << " " << vertex; + std::clog << " " << vertex; } - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES int sh_dimension = dimension(sh); diff --git a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp index c0615b12..229ae46f 100644 --- a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp @@ -30,16 +30,16 @@ void print_simplex_filtration(Simplex_tree& st, const std::string& msg) { // Required before browsing through filtration values st.initialize_filtration(); - std::cout << "********************************************************************\n"; - std::cout << "* " << msg << "\n"; - std::cout << "* The complex contains " << st.num_simplices() << " simplices"; - std::cout << " - dimension " << st.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "* " << msg << "\n"; + std::clog << "* The complex contains " << st.num_simplices() << " simplices"; + std::clog << " - dimension " << st.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; - for (auto vertex : st.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")"; - std::cout << std::endl; + for (auto vertex : st.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")"; + std::clog << std::endl; } } @@ -70,8 +70,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te print_simplex_filtration(st, "Default Simplex_tree is initialized"); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF COPY CONSTRUCTOR" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF COPY CONSTRUCTOR" << std::endl; Simplex_tree st1(st); Simplex_tree st2(st); @@ -82,8 +82,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te BOOST_CHECK(st == st2); BOOST_CHECK(st1 == st); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF COPY ASSIGNMENT" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF COPY ASSIGNMENT" << std::endl; Simplex_tree st3; // To check there is no memory leak st3.insert_simplex_and_subfaces({9, 10, 11}, 200.0); @@ -103,8 +103,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te BOOST_CHECK(st3 == st); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF MOVE CONSTRUCTOR" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF MOVE CONSTRUCTOR" << std::endl; Simplex_tree st5(std::move(st1)); print_simplex_filtration(st5, "First move constructor from the default Simplex_tree"); print_simplex_filtration(st1, "First moved Simplex_tree shall be empty"); @@ -122,8 +122,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te BOOST_CHECK(empty_st == st2); BOOST_CHECK(st1 == empty_st); - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF MOVE ASSIGNMENT" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF MOVE ASSIGNMENT" << std::endl; Simplex_tree st7; // To check there is no memory leak diff --git a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp index fab25eb8..881a06ae 100644 --- a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp @@ -55,34 +55,34 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, li simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){ bool result = false; - std::cout << "Blocker on ["; + std::clog << "Blocker on ["; // User can loop on the vertices from the given simplex_handle i.e. for (auto vertex : simplex_tree.simplex_vertex_range(sh)) { // We block the expansion, if the vertex '6' is in the given list of vertices if (vertex == 6) result = true; - std::cout << vertex << ", "; + std::clog << vertex << ", "; } - std::cout << "] ( " << simplex_tree.filtration(sh); + std::clog << "] ( " << simplex_tree.filtration(sh); // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries) simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.); - std::cout << " + 1. ) = " << result << std::endl; + std::clog << " + 1. ) = " << result << std::endl; return result; }); - std::cout << "********************************************************************\n"; - std::cout << "simplex_tree_expansion_with_blockers_3\n"; - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; - std::cout << " - dimension " << simplex_tree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "simplex_tree_expansion_with_blockers_3\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; + std::clog << " - dimension " << simplex_tree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) - std::cout << "(" << vertex << ")"; - std::cout << std::endl; + std::clog << "(" << vertex << ")"; + std::clog << std::endl; } BOOST_CHECK(simplex_tree.num_simplices() == 23); @@ -117,34 +117,34 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, li simplex_tree.expansion_with_blockers(2, [&](Simplex_handle sh){ bool result = false; - std::cout << "Blocker on ["; + std::clog << "Blocker on ["; // User can loop on the vertices from the given simplex_handle i.e. for (auto vertex : simplex_tree.simplex_vertex_range(sh)) { // We block the expansion, if the vertex '6' is in the given list of vertices if (vertex == 6) result = true; - std::cout << vertex << ", "; + std::clog << vertex << ", "; } - std::cout << "] ( " << simplex_tree.filtration(sh); + std::clog << "] ( " << simplex_tree.filtration(sh); // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries) simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.); - std::cout << " + 1. ) = " << result << std::endl; + std::clog << " + 1. ) = " << result << std::endl; return result; }); - std::cout << "********************************************************************\n"; - std::cout << "simplex_tree_expansion_with_blockers_2\n"; - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; - std::cout << " - dimension " << simplex_tree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "simplex_tree_expansion_with_blockers_2\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; + std::clog << " - dimension " << simplex_tree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) - std::cout << "(" << vertex << ")"; - std::cout << std::endl; + std::clog << "(" << vertex << ")"; + std::clog << std::endl; } BOOST_CHECK(simplex_tree.num_simplices() == 22); @@ -176,17 +176,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_var simplex_tree.insert_simplex({6}, 10.); simplex_tree.expansion(3); - std::cout << "********************************************************************\n"; - std::cout << "simplex_tree_expansion_3\n"; - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; - std::cout << " - dimension " << simplex_tree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "simplex_tree_expansion_3\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; + std::clog << " - dimension " << simplex_tree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) - std::cout << "(" << vertex << ")"; - std::cout << std::endl; + std::clog << "(" << vertex << ")"; + std::clog << std::endl; } BOOST_CHECK(simplex_tree.num_simplices() == 24); @@ -220,17 +220,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_v simplex_tree.expansion(2); - std::cout << "********************************************************************\n"; - std::cout << "simplex_tree_expansion_2\n"; - std::cout << "********************************************************************\n"; - std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; - std::cout << " - dimension " << simplex_tree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::clog << "********************************************************************\n"; + std::clog << "simplex_tree_expansion_2\n"; + std::clog << "********************************************************************\n"; + std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices"; + std::clog << " - dimension " << simplex_tree.dimension() << "\n"; + std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; for (auto f_simplex : simplex_tree.filtration_simplex_range()) { - std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; + std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] "; for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) - std::cout << "(" << vertex << ")"; - std::cout << std::endl; + std::clog << "(" << vertex << ")"; + std::clog << std::endl; } BOOST_CHECK(simplex_tree.num_simplices() == 23); diff --git a/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp index 28c29489..20007488 100644 --- a/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp @@ -34,8 +34,8 @@ typedef boost::mpl::list, > list_of_tested_variants; BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_variants) { - std::cout << "********************************************************************" << std::endl; - std::cout << "SIMPLEX TREE IOSTREAM OPERATOR" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "SIMPLEX TREE IOSTREAM OPERATOR" << std::endl; Stree_type st; @@ -46,15 +46,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = " + std::clog << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = " << st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } // st: @@ -75,15 +75,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari simplex_tree_istream >> read_st; // Display the Simplex_tree - std::cout << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = " + std::clog << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = " << read_st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : read_st.filtration_simplex_range()) { - std::cout << " " << "[" << read_st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << read_st.filtration(f_simplex) << "] "; for (auto vertex : read_st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } BOOST_CHECK(st == read_st); @@ -91,8 +91,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari BOOST_AUTO_TEST_CASE(mini_iostream_operator) { - std::cout << "********************************************************************" << std::endl; - std::cout << "MINI SIMPLEX TREE IOSTREAM OPERATOR" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "MINI SIMPLEX TREE IOSTREAM OPERATOR" << std::endl; Simplex_tree st; @@ -103,14 +103,14 @@ BOOST_AUTO_TEST_CASE(mini_iostream_operator) { st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = " + std::clog << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = " << st.dimension() << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } // st: @@ -131,15 +131,15 @@ BOOST_AUTO_TEST_CASE(mini_iostream_operator) { simplex_tree_istream >> read_st; // Display the Simplex_tree - std::cout << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = " + std::clog << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = " << read_st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : read_st.filtration_simplex_range()) { - std::cout << " " << "[" << read_st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << read_st.filtration(f_simplex) << "] "; for (auto vertex : read_st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } BOOST_CHECK(st == read_st); diff --git a/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp index 97347992..36b8b3c6 100644 --- a/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp @@ -32,8 +32,8 @@ using Mini_stree = Simplex_tree; using Stree = Simplex_tree<>; BOOST_AUTO_TEST_CASE(remove_maximal_simplex) { - std::cout << "********************************************************************" << std::endl; - std::cout << "REMOVE MAXIMAL SIMPLEX" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "REMOVE MAXIMAL SIMPLEX" << std::endl; Mini_stree st; @@ -66,21 +66,21 @@ BOOST_AUTO_TEST_CASE(remove_maximal_simplex) { // 5 #ifdef GUDHI_DEBUG - std::cout << "Check exception throw in debug mode" << std::endl; + std::clog << "Check exception throw in debug mode" << std::endl; // throw excpt because sh has children BOOST_CHECK_THROW (st.remove_maximal_simplex(st.find({0, 1, 6})), std::invalid_argument); BOOST_CHECK_THROW (st.remove_maximal_simplex(st.find({3})), std::invalid_argument); BOOST_CHECK(st == st_complete); #endif - std::cout << "st.remove_maximal_simplex({0, 2})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 2})" << std::endl; st.remove_maximal_simplex(st.find({0, 2})); - std::cout << "st.remove_maximal_simplex({0, 1, 2})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 1, 2})" << std::endl; st.remove_maximal_simplex(st.find({0, 1, 2})); - std::cout << "st.remove_maximal_simplex({1, 2})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 2})" << std::endl; st.remove_maximal_simplex(st.find({1, 2})); - std::cout << "st.remove_maximal_simplex({2})" << std::endl; + std::clog << "st.remove_maximal_simplex({2})" << std::endl; st.remove_maximal_simplex(st.find({2})); - std::cout << "st.remove_maximal_simplex({3})" << std::endl; + std::clog << "st.remove_maximal_simplex({3})" << std::endl; st.remove_maximal_simplex(st.find({0, 3})); BOOST_CHECK(st == st_pruned); @@ -102,39 +102,39 @@ BOOST_AUTO_TEST_CASE(remove_maximal_simplex) { // 5 // Remove all 7 to test the both remove_maximal_simplex cases (when _members is empty or not) - std::cout << "st.remove_maximal_simplex({0, 1, 6, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 1, 6, 7})" << std::endl; st.remove_maximal_simplex(st.find({0, 1, 6, 7})); - std::cout << "st.remove_maximal_simplex({0, 1, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 1, 7})" << std::endl; st.remove_maximal_simplex(st.find({0, 1, 7})); - std::cout << "st.remove_maximal_simplex({0, 6, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 6, 7})" << std::endl; st.remove_maximal_simplex(st.find({0, 6, 7})); - std::cout << "st.remove_maximal_simplex({0, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 7})" << std::endl; st.remove_maximal_simplex(st.find({0, 7})); - std::cout << "st.remove_maximal_simplex({1, 6, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 6, 7})" << std::endl; st.remove_maximal_simplex(st.find({1, 6, 7})); - std::cout << "st.remove_maximal_simplex({1, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 7})" << std::endl; st.remove_maximal_simplex(st.find({1, 7})); - std::cout << "st.remove_maximal_simplex({6, 7})" << std::endl; + std::clog << "st.remove_maximal_simplex({6, 7})" << std::endl; st.remove_maximal_simplex(st.find({6, 7})); - std::cout << "st.remove_maximal_simplex({7})" << std::endl; + std::clog << "st.remove_maximal_simplex({7})" << std::endl; st.remove_maximal_simplex(st.find({7})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); // Check dimension calls lower_upper_bound_dimension to recompute dimension BOOST_CHECK(st.dimension() == 2); BOOST_CHECK(st.upper_bound_dimension() == 2); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << " | st_wo_seven.upper_bound_dimension()=" << st_wo_seven.upper_bound_dimension() << std::endl; - std::cout << "st.dimension()=" << st.dimension() << " | st_wo_seven.dimension()=" << st_wo_seven.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << " | st_wo_seven.dimension()=" << st_wo_seven.dimension() << std::endl; BOOST_CHECK(st == st_wo_seven); } BOOST_AUTO_TEST_CASE(auto_dimension_set) { - std::cout << "********************************************************************" << std::endl; - std::cout << "DIMENSION ON REMOVE MAXIMAL SIMPLEX" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "DIMENSION ON REMOVE MAXIMAL SIMPLEX" << std::endl; Mini_stree st; @@ -148,80 +148,80 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) { BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({6, 7, 8, 10})" << std::endl; + std::clog << "st.remove_maximal_simplex({6, 7, 8, 10})" << std::endl; st.remove_maximal_simplex(st.find({6, 7, 8, 10})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({6, 7, 8, 9})" << std::endl; + std::clog << "st.remove_maximal_simplex({6, 7, 8, 9})" << std::endl; st.remove_maximal_simplex(st.find({6, 7, 8, 9})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl; st.remove_maximal_simplex(st.find({1, 2, 3, 4})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl; st.remove_maximal_simplex(st.find({1, 2, 3, 5})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 2); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; - std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl; st.insert_simplex_and_subfaces({1, 2, 3, 5}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl; st.insert_simplex_and_subfaces({1, 2, 3, 4}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl; st.remove_maximal_simplex(st.find({1, 2, 3, 5})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl; + std::clog << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl; st.remove_maximal_simplex(st.find({1, 2, 3, 4})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 2); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; - std::cout << "st.insert_simplex_and_subfaces({0, 1, 3, 4})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({0, 1, 3, 4})" << std::endl; st.insert_simplex_and_subfaces({0, 1, 3, 4}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.remove_maximal_simplex({0, 1, 3, 4})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 1, 3, 4})" << std::endl; st.remove_maximal_simplex(st.find({0, 1, 3, 4})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 2); - std::cout << "st.dimension()=" << st.dimension() << std::endl; + std::clog << "st.dimension()=" << st.dimension() << std::endl; - std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl; st.insert_simplex_and_subfaces({1, 2, 3, 5}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl; st.insert_simplex_and_subfaces({1, 2, 3, 4}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); @@ -229,7 +229,7 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) { // Check you can override the dimension // This is a limit test case - shall not happen st.set_dimension(1); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 1); // check dimension() and lower_upper_bound_dimension() is not giving the right answer because dimension is too low BOOST_CHECK(st.dimension() == 1); @@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) { // Check you can override the dimension // This is a limit test case - shall not happen st.set_dimension(6); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 6); // check dimension() do not launch lower_upper_bound_dimension() BOOST_CHECK(st.dimension() == 6); @@ -246,27 +246,27 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) { // Reset with the correct value st.set_dimension(3); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == 3); - std::cout << "st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6})" << std::endl; + std::clog << "st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6})" << std::endl; st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6}); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 6); BOOST_CHECK(st.dimension() == 6); - std::cout << "st.remove_maximal_simplex({0, 1, 2, 3, 4, 5, 6})" << std::endl; + std::clog << "st.remove_maximal_simplex({0, 1, 2, 3, 4, 5, 6})" << std::endl; st.remove_maximal_simplex(st.find({0, 1, 2, 3, 4, 5, 6})); - std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; + std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 6); BOOST_CHECK(st.dimension() == 5); } BOOST_AUTO_TEST_CASE(prune_above_filtration) { - std::cout << "********************************************************************" << std::endl; - std::cout << "PRUNE ABOVE FILTRATION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "PRUNE ABOVE FILTRATION" << std::endl; Stree st; @@ -321,15 +321,15 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) { BOOST_CHECK(!simplex_is_changed); // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices"; - std::cout << " - dimension " << st.dimension() << std::endl; - std::cout << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices"; + std::clog << " - dimension " << st.dimension() << std::endl; + std::clog << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } // Check the pruned cases @@ -340,15 +340,15 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) { BOOST_CHECK(simplex_is_changed); // Display the Simplex_tree - std::cout << "The complex pruned at 2.5 contains " << st.num_simplices() << " simplices"; - std::cout << " - dimension " << st.dimension() << std::endl; + std::clog << "The complex pruned at 2.5 contains " << st.num_simplices() << " simplices"; + std::clog << " - dimension " << st.dimension() << std::endl; simplex_is_changed = st.prune_above_filtration(2.0); if (simplex_is_changed) st.initialize_filtration(); - std::cout << "The complex pruned at 2.0 contains " << st.num_simplices() << " simplices"; - std::cout << " - dimension " << st.dimension() << std::endl; + std::clog << "The complex pruned at 2.0 contains " << st.num_simplices() << " simplices"; + std::clog << " - dimension " << st.dimension() << std::endl; BOOST_CHECK(st == st_pruned); BOOST_CHECK(!simplex_is_changed); @@ -360,12 +360,12 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) { st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices"; - std::cout << " - upper_bound_dimension " << st.upper_bound_dimension() << std::endl; + std::clog << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices"; + std::clog << " - upper_bound_dimension " << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == 3); BOOST_CHECK(st.dimension() == -1); - std::cout << "upper_bound_dimension=" << st.upper_bound_dimension() << std::endl; + std::clog << "upper_bound_dimension=" << st.upper_bound_dimension() << std::endl; BOOST_CHECK(st.upper_bound_dimension() == -1); BOOST_CHECK(st == st_empty); @@ -380,8 +380,8 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) { } BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) { - std::cout << "********************************************************************" << std::endl; - std::cout << "MINI PRUNE ABOVE FILTRATION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "MINI PRUNE ABOVE FILTRATION" << std::endl; Mini_stree st; @@ -402,7 +402,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) { st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl; BOOST_CHECK(st.num_simplices() == 27); // Test case to the limit - With these options, there is no filtration, which means filtration is 0 @@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) { if (simplex_is_changed) st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The complex pruned at 1.0 contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << "The complex pruned at 1.0 contains " << st.num_simplices() << " simplices" << std::endl; BOOST_CHECK(!simplex_is_changed); BOOST_CHECK(st.num_simplices() == 27); @@ -418,7 +418,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) { if (simplex_is_changed) st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices" << std::endl; BOOST_CHECK(!simplex_is_changed); BOOST_CHECK(st.num_simplices() == 27); @@ -427,11 +427,11 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) { if (simplex_is_changed) st.initialize_filtration(); // Display the Simplex_tree - std::cout << "The complex pruned at -1.0 contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << "The complex pruned at -1.0 contains " << st.num_simplices() << " simplices" << std::endl; BOOST_CHECK(simplex_is_changed); BOOST_CHECK(st.num_simplices() == 0); // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl; } diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp index 58bfa8db..7746fa2a 100644 --- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp @@ -48,22 +48,22 @@ void test_empty_simplex_tree(typeST& tst) { template void test_iterators_on_empty_simplex_tree(typeST& tst) { - std::cout << "Iterator on vertices: " << std::endl; + std::clog << "Iterator on vertices: " << std::endl; for (auto vertex : tst.complex_vertex_range()) { - std::cout << "vertice:" << vertex << std::endl; + std::clog << "vertice:" << vertex << std::endl; BOOST_CHECK(false); // shall be empty } - std::cout << "Iterator on simplices: " << std::endl; + std::clog << "Iterator on simplices: " << std::endl; for (auto simplex : tst.complex_simplex_range()) { BOOST_CHECK(simplex != simplex); // shall be empty - to remove warning of non-used simplex } - std::cout + std::clog << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : tst.filtration_simplex_range()) { BOOST_CHECK(false); // shall be empty - std::cout << "test_iterators_on_empty_simplex_tree - filtration=" + std::clog << "test_iterators_on_empty_simplex_tree - filtration=" << tst.filtration(f_simplex) << std::endl; } } @@ -72,15 +72,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_when_empty, typeST, list_of_tested_va typedef std::pair typePairSimplexBool; typedef std::vector typeVectorVertex; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF DEFAULT CONSTRUCTOR" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF DEFAULT CONSTRUCTOR" << std::endl; typeST st; test_empty_simplex_tree(st); test_iterators_on_empty_simplex_tree(st); // TEST OF EMPTY INSERTION - std::cout << "TEST OF EMPTY INSERTION" << std::endl; + std::clog << "TEST OF EMPTY INSERTION" << std::endl; typeVectorVertex simplexVectorEmpty; BOOST_CHECK(simplexVectorEmpty.empty() == true); typePairSimplexBool returnEmptyValue = st.insert_simplex(simplexVectorEmpty, 0.0); @@ -98,8 +98,8 @@ bool AreAlmostTheSame(float a, float b) { BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_from_file, typeST, list_of_tested_variants) { // TEST OF INSERTION - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF SIMPLEX TREE FROM A FILE" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF SIMPLEX TREE FROM A FILE" << std::endl; typeST st; std::string inputFile("simplex_tree_for_unit_test.txt"); @@ -107,8 +107,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_from_file, typeST, list_of_tested_var simplex_tree_stream >> st; // Display the Simplex_tree - std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl; - std::cout << " - dimension " << st.dimension() << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << " - dimension " << st.dimension() << std::endl; // Check BOOST_CHECK(st.num_simplices() == 143353); @@ -134,13 +134,13 @@ template void test_simplex_tree_contains(typeST& simplexTree, typeSimplex& simplex, int pos) { auto f_simplex = simplexTree.filtration_simplex_range().begin() + pos; - std::cout << "test_simplex_tree_contains - filtration=" << simplexTree.filtration(*f_simplex) << "||" << simplex.second << std::endl; + std::clog << "test_simplex_tree_contains - filtration=" << simplexTree.filtration(*f_simplex) << "||" << simplex.second << std::endl; BOOST_CHECK(AreAlmostTheSame(simplexTree.filtration(*f_simplex), simplex.second)); int simplexIndex = simplex.first.size() - 1; std::sort(simplex.first.begin(), simplex.first.end()); // if the simplex wasn't sorted, the next test could fail for (auto vertex : simplexTree.simplex_vertex_range(*f_simplex)) { - std::cout << "test_simplex_tree_contains - vertex=" << vertex << "||" << simplex.first.at(simplexIndex) << std::endl; + std::clog << "test_simplex_tree_contains - vertex=" << vertex << "||" << simplex.first.at(simplexIndex) << std::endl; BOOST_CHECK(vertex == simplex.first.at(simplexIndex)); BOOST_CHECK(simplexIndex >= 0); simplexIndex--; @@ -163,7 +163,7 @@ void set_and_test_simplex_tree_dim_fil(typeST& simplexTree, int vectorSize, cons if (vectorSize > dim_max + 1) { dim_max = vectorSize - 1; simplexTree.set_dimension(dim_max); - std::cout << " set_and_test_simplex_tree_dim_fil - dim_max=" << dim_max + std::clog << " set_and_test_simplex_tree_dim_fil - dim_max=" << dim_max << std::endl; } @@ -193,12 +193,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var dim_max = -1; // TEST OF INSERTION - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF INSERTION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF INSERTION" << std::endl; typeST st; // ++ FIRST - std::cout << " - INSERT 0" << std::endl; + std::clog << " - INSERT 0" << std::endl; typeVectorVertex firstSimplexVector{0}; BOOST_CHECK(firstSimplexVector.size() == 1); typeSimplex firstSimplex = std::make_pair(firstSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); @@ -209,7 +209,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 1); // ++ SECOND - std::cout << " - INSERT 1" << std::endl; + std::clog << " - INSERT 1" << std::endl; typeVectorVertex secondSimplexVector{1}; BOOST_CHECK(secondSimplexVector.size() == 1); typeSimplex secondSimplex = std::make_pair(secondSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); @@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 2); // ++ THIRD - std::cout << " - INSERT (0,1)" << std::endl; + std::clog << " - INSERT (0,1)" << std::endl; typeVectorVertex thirdSimplexVector{0, 1}; BOOST_CHECK(thirdSimplexVector.size() == 2); typeSimplex thirdSimplex = std::make_pair(thirdSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); @@ -231,7 +231,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 2); // Not incremented !! // ++ FOURTH - std::cout << " - INSERT 2" << std::endl; + std::clog << " - INSERT 2" << std::endl; typeVectorVertex fourthSimplexVector{2}; BOOST_CHECK(fourthSimplexVector.size() == 1); typeSimplex fourthSimplex = std::make_pair(fourthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); @@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 3); // ++ FIFTH - std::cout << " - INSERT (2,0)" << std::endl; + std::clog << " - INSERT (2,0)" << std::endl; typeVectorVertex fifthSimplexVector{2, 0}; BOOST_CHECK(fifthSimplexVector.size() == 2); typeSimplex fifthSimplex = std::make_pair(fifthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); @@ -253,7 +253,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !! // ++ SIXTH - std::cout << " - INSERT (2,1)" << std::endl; + std::clog << " - INSERT (2,1)" << std::endl; typeVectorVertex sixthSimplexVector{2, 1}; BOOST_CHECK(sixthSimplexVector.size() == 2); typeSimplex sixthSimplex = std::make_pair(sixthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); @@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !! // ++ SEVENTH - std::cout << " - INSERT (2,1,0)" << std::endl; + std::clog << " - INSERT (2,1,0)" << std::endl; typeVectorVertex seventhSimplexVector{2, 1, 0}; BOOST_CHECK(seventhSimplexVector.size() == 3); typeSimplex seventhSimplex = std::make_pair(seventhSimplexVector, Filtration_value(THIRD_FILTRATION_VALUE)); @@ -275,7 +275,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !! // ++ EIGHTH - std::cout << " - INSERT 3" << std::endl; + std::clog << " - INSERT 3" << std::endl; typeVectorVertex eighthSimplexVector{3}; BOOST_CHECK(eighthSimplexVector.size() == 1); typeSimplex eighthSimplex = std::make_pair(eighthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE)); @@ -286,7 +286,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 4); // ++ NINETH - std::cout << " - INSERT (3,0)" << std::endl; + std::clog << " - INSERT (3,0)" << std::endl; typeVectorVertex ninethSimplexVector{3, 0}; BOOST_CHECK(ninethSimplexVector.size() == 2); typeSimplex ninethSimplex = std::make_pair(ninethSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE)); @@ -297,7 +297,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var BOOST_CHECK(st.num_vertices() == (size_t) 4); // Not incremented !! // ++ TENTH - std::cout << " - INSERT 0 (already inserted)" << std::endl; + std::clog << " - INSERT 0 (already inserted)" << std::endl; typeVectorVertex tenthSimplexVector{0}; BOOST_CHECK(tenthSimplexVector.size() == 1); // With a different filtration value @@ -308,12 +308,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var // Simplex_handle = boost::container::flat_map< typeST::Vertex_handle, Node >::iterator typename typeST::Simplex_handle shReturned = returnValue.first; BOOST_CHECK(shReturned == typename typeST::Simplex_handle(nullptr)); - std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl; + std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl; BOOST_CHECK(st.num_vertices() == (size_t) 4); // Not incremented !! BOOST_CHECK(st.dimension() == dim_max); // ++ ELEVENTH - std::cout << " - INSERT (2,1,0) (already inserted)" << std::endl; + std::clog << " - INSERT (2,1,0) (already inserted)" << std::endl; typeVectorVertex eleventhSimplexVector{2, 1, 0}; BOOST_CHECK(eleventhSimplexVector.size() == 3); typeSimplex eleventhSimplex = std::make_pair(eleventhSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE)); @@ -343,35 +343,35 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var // [0.2] 3 0 // [0.3] 2 1 0 // !! Be careful, simplex are sorted by filtration value on insertion !! - std::cout << "simplex_tree_insertion - first - 0" << std::endl; + std::clog << "simplex_tree_insertion - first - 0" << std::endl; test_simplex_tree_contains(st, firstSimplex, 0); // (0) -> 0 - std::cout << "simplex_tree_insertion - second - 1" << std::endl; + std::clog << "simplex_tree_insertion - second - 1" << std::endl; test_simplex_tree_contains(st, secondSimplex, 1); // (1) -> 1 - std::cout << "simplex_tree_insertion - third - 4" << std::endl; + std::clog << "simplex_tree_insertion - third - 4" << std::endl; test_simplex_tree_contains(st, thirdSimplex, 4); // (0,1) -> 4 - std::cout << "simplex_tree_insertion - fourth - 2" << std::endl; + std::clog << "simplex_tree_insertion - fourth - 2" << std::endl; test_simplex_tree_contains(st, fourthSimplex, 2); // (2) -> 2 - std::cout << "simplex_tree_insertion - fifth - 5" << std::endl; + std::clog << "simplex_tree_insertion - fifth - 5" << std::endl; test_simplex_tree_contains(st, fifthSimplex, 5); // (2,0) -> 5 - std::cout << "simplex_tree_insertion - sixth - 6" << std::endl; + std::clog << "simplex_tree_insertion - sixth - 6" << std::endl; test_simplex_tree_contains(st, sixthSimplex, 6); //(2,1) -> 6 - std::cout << "simplex_tree_insertion - seventh - 8" << std::endl; + std::clog << "simplex_tree_insertion - seventh - 8" << std::endl; test_simplex_tree_contains(st, seventhSimplex, 8); // (2,1,0) -> 8 - std::cout << "simplex_tree_insertion - eighth - 3" << std::endl; + std::clog << "simplex_tree_insertion - eighth - 3" << std::endl; test_simplex_tree_contains(st, eighthSimplex, 3); // (3) -> 3 - std::cout << "simplex_tree_insertion - nineth - 7" << std::endl; + std::clog << "simplex_tree_insertion - nineth - 7" << std::endl; test_simplex_tree_contains(st, ninethSimplex, 7); // (3,0) -> 7 // Display the Simplex_tree - Can not be done in the middle of 2 inserts - std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl; - std::cout << " - dimension " << st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << " - dimension " << st.dimension() << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } @@ -380,14 +380,14 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o typedef std::pair typePairSimplexBool; typedef std::vector typeVectorVertex; typedef std::pair typeSimplex; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST OF RECURSIVE INSERTION" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST OF RECURSIVE INSERTION" << std::endl; typeST st; typePairSimplexBool returnValue; int position = 0; // ++ FIRST - std::cout << " - INSERT (2,1,0)" << std::endl; + std::clog << " - INSERT (2,1,0)" << std::endl; typeVectorVertex SimplexVector1{2, 1, 0}; BOOST_CHECK(SimplexVector1.size() == 3); returnValue = st.insert_simplex_and_subfaces(SimplexVector1); @@ -400,13 +400,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o std::sort(SimplexVector1.begin(), SimplexVector1.end(), std::greater()); for (auto vertex : st.simplex_vertex_range(returnValue.first)) { // Check returned Simplex_handle - std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector1[position] << std::endl; + std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector1[position] << std::endl; BOOST_CHECK(vertex == SimplexVector1[position]); position++; } // ++ SECOND - std::cout << " - INSERT 3" << std::endl; + std::clog << " - INSERT 3" << std::endl; typeVectorVertex SimplexVector2{3}; BOOST_CHECK(SimplexVector2.size() == 1); returnValue = st.insert_simplex_and_subfaces(SimplexVector2); @@ -419,13 +419,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o std::sort(SimplexVector2.begin(), SimplexVector2.end(), std::greater()); for (auto vertex : st.simplex_vertex_range(returnValue.first)) { // Check returned Simplex_handle - std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector2[position] << std::endl; + std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector2[position] << std::endl; BOOST_CHECK(vertex == SimplexVector2[position]); position++; } // ++ THIRD - std::cout << " - INSERT (0,3)" << std::endl; + std::clog << " - INSERT (0,3)" << std::endl; typeVectorVertex SimplexVector3{3, 0}; BOOST_CHECK(SimplexVector3.size() == 2); returnValue = st.insert_simplex_and_subfaces(SimplexVector3); @@ -438,13 +438,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o std::sort(SimplexVector3.begin(), SimplexVector3.end(), std::greater()); for (auto vertex : st.simplex_vertex_range(returnValue.first)) { // Check returned Simplex_handle - std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector3[position] << std::endl; + std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector3[position] << std::endl; BOOST_CHECK(vertex == SimplexVector3[position]); position++; } // ++ FOURTH - std::cout << " - INSERT (1,0) (already inserted)" << std::endl; + std::clog << " - INSERT (1,0) (already inserted)" << std::endl; typeVectorVertex SimplexVector4{1, 0}; BOOST_CHECK(SimplexVector4.size() == 2); returnValue = st.insert_simplex_and_subfaces(SimplexVector4); @@ -455,7 +455,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o BOOST_CHECK(false == returnValue.second); // ++ FIFTH - std::cout << " - INSERT (3,4,5)" << std::endl; + std::clog << " - INSERT (3,4,5)" << std::endl; typeVectorVertex SimplexVector5{3, 4, 5}; BOOST_CHECK(SimplexVector5.size() == 3); returnValue = st.insert_simplex_and_subfaces(SimplexVector5); @@ -468,13 +468,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o std::sort(SimplexVector5.begin(), SimplexVector5.end(), std::greater()); for (auto vertex : st.simplex_vertex_range(returnValue.first)) { // Check returned Simplex_handle - std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector5[position] << std::endl; + std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector5[position] << std::endl; BOOST_CHECK(vertex == SimplexVector5[position]); position++; } // ++ SIXTH - std::cout << " - INSERT (0,1,6,7)" << std::endl; + std::clog << " - INSERT (0,1,6,7)" << std::endl; typeVectorVertex SimplexVector6{0, 1, 6, 7}; BOOST_CHECK(SimplexVector6.size() == 4); returnValue = st.insert_simplex_and_subfaces(SimplexVector6); @@ -487,7 +487,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o std::sort(SimplexVector6.begin(), SimplexVector6.end(), std::greater()); for (auto vertex : st.simplex_vertex_range(returnValue.first)) { // Check returned Simplex_handle - std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector6[position] << std::endl; + std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector6[position] << std::endl; BOOST_CHECK(vertex == SimplexVector6[position]); position++; } @@ -525,63 +525,63 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o // ------------------------------------------------------------------------------------------------------------------ typeVectorVertex simpleSimplexVector{1}; typename typeST::Simplex_handle simplexFound = st.find(simpleSimplexVector); - std::cout << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n"; if (simplexFound != st.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; // Check it is found BOOST_CHECK(simplexFound != st.null_simplex()); typeVectorVertex unknownSimplexVector{15}; simplexFound = st.find(unknownSimplexVector); - std::cout << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n"; if (simplexFound != st.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; // Check it is NOT found BOOST_CHECK(simplexFound == st.null_simplex()); simplexFound = st.find(SimplexVector6); - std::cout << "**************IS THE SIMPLEX {0,1,6,7} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {0,1,6,7} IN THE SIMPLEX TREE ?\n"; if (simplexFound != st.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; // Check it is found BOOST_CHECK(simplexFound != st.null_simplex()); typeVectorVertex otherSimplexVector{1, 15}; simplexFound = st.find(otherSimplexVector); - std::cout << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n"; if (simplexFound != st.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; // Check it is NOT found BOOST_CHECK(simplexFound == st.null_simplex()); typeVectorVertex invSimplexVector{1, 2, 0}; simplexFound = st.find(invSimplexVector); - std::cout << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n"; + std::clog << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n"; if (simplexFound != st.null_simplex()) - std::cout << "***+ YES IT IS!\n"; + std::clog << "***+ YES IT IS!\n"; else - std::cout << "***- NO IT ISN'T\n"; + std::clog << "***- NO IT ISN'T\n"; // Check it is found BOOST_CHECK(simplexFound != st.null_simplex()); // Display the Simplex_tree - Can not be done in the middle of 2 inserts - std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl; - std::cout << " - dimension " << st.dimension() << std::endl; - std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; + std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl; + std::clog << " - dimension " << st.dimension() << std::endl; + std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl; for (auto f_simplex : st.filtration_simplex_range()) { - std::cout << " " << "[" << st.filtration(f_simplex) << "] "; + std::clog << " " << "[" << st.filtration(f_simplex) << "] "; for (auto vertex : st.simplex_vertex_range(f_simplex)) { - std::cout << (int) vertex << " "; + std::clog << (int) vertex << " "; } - std::cout << std::endl; + std::clog << std::endl; } } @@ -595,17 +595,17 @@ void test_cofaces(typeST& st, const std::vector& expected, int di for (auto simplex = cofaces.begin(); simplex != cofaces.end(); ++simplex) { typename typeST::Simplex_vertex_range rg = st.simplex_vertex_range(*simplex); for (auto vertex = rg.begin(); vertex != rg.end(); ++vertex) { - std::cout << "(" << *vertex << ")"; + std::clog << "(" << *vertex << ")"; } - std::cout << std::endl; + std::clog << std::endl; BOOST_CHECK(std::find(res.begin(), res.end(), *simplex) != res.end()); } } BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_variants) { typedef std::vector typeVectorVertex; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST COFACE ALGORITHM" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST COFACE ALGORITHM" << std::endl; typeST st; typeVectorVertex SimplexVector{2, 1, 0}; @@ -631,7 +631,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var std::vector simplex_result; std::vector result; - std::cout << "First test - Star of (3):" << std::endl; + std::clog << "First test - Star of (3):" << std::endl; simplex_result = {3}; result.push_back(st.find(simplex_result)); @@ -656,7 +656,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var vertex.push_back(1); vertex.push_back(7); - std::cout << "Second test - Star of (1,7): " << std::endl; + std::clog << "Second test - Star of (1,7): " << std::endl; simplex_result = {7, 1}; result.push_back(st.find(simplex_result)); @@ -673,7 +673,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var test_cofaces(st, vertex, 0, result); result.clear(); - std::cout << "Third test - 2-dimension Cofaces of simplex(1,7) : " << std::endl; + std::clog << "Third test - 2-dimension Cofaces of simplex(1,7) : " << std::endl; simplex_result = {7, 1, 0}; result.push_back(st.find(simplex_result)); @@ -684,15 +684,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var test_cofaces(st, vertex, 1, result); result.clear(); - std::cout << "Cofaces with a codimension too high (codimension + vetices > tree.dimension) :" << std::endl; + std::clog << "Cofaces with a codimension too high (codimension + vetices > tree.dimension) :" << std::endl; test_cofaces(st, vertex, 5, result); - //std::cout << "Cofaces with an empty codimension" << std::endl; + //std::clog << "Cofaces with an empty codimension" << std::endl; //test_cofaces(st, vertex, -1, result); - // std::cout << "Cofaces in an empty simplex tree" << std::endl; + // std::clog << "Cofaces in an empty simplex tree" << std::endl; // typeST empty_tree; // test_cofaces(empty_tree, vertex, 1, result); - //std::cout << "Cofaces of an empty simplex" << std::endl; + //std::clog << "Cofaces of an empty simplex" << std::endl; //vertex.clear(); // test_cofaces(st, vertex, 1, result); @@ -700,8 +700,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_variants) { typedef std::vector typeVectorVertex; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST COPY MOVE CONSTRUCTORS" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST COPY MOVE CONSTRUCTORS" << std::endl; typeST st; typeVectorVertex SimplexVector{2, 1, 0}; @@ -725,11 +725,11 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_ /* o */ /* 5 */ - std::cout << "Printing st - address = " << &st << std::endl; + std::clog << "Printing st - address = " << &st << std::endl; // Copy constructor typeST st_copy = st; - std::cout << "Printing a copy of st - address = " << &st_copy << std::endl; + std::clog << "Printing a copy of st - address = " << &st_copy << std::endl; // Check the data are the same BOOST_CHECK(st == st_copy); @@ -738,7 +738,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_ // Move constructor typeST st_move = std::move(st); - std::cout << "Printing a move of st - address = " << &st_move << std::endl; + std::clog << "Printing a move of st - address = " << &st_move << std::endl; // Check the data are the same BOOST_CHECK(st_move == st_copy); @@ -753,7 +753,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_ BOOST_CHECK(st.num_simplices() == 0); BOOST_CHECK(st.num_vertices() == (size_t)0); - std::cout << "Printing st once again- address = " << &st << std::endl; + std::clog << "Printing st once again- address = " << &st << std::endl; } template @@ -768,22 +768,22 @@ void test_simplex_is_vertex(typeST& st, typename typeST::Simplex_handle sh, type BOOST_AUTO_TEST_CASE(non_contiguous) { typedef Simplex_tree<> typeST; typedef typeST::Simplex_handle Simplex_handle; - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST NON-CONTIGUOUS VERTICES" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST NON-CONTIGUOUS VERTICES" << std::endl; typeST st; typeST::Vertex_handle e[] = {3,-7}; - std::cout << "Insert" << std::endl; + std::clog << "Insert" << std::endl; st.insert_simplex_and_subfaces(e); BOOST_CHECK(st.num_vertices() == 2); BOOST_CHECK(st.num_simplices() == 3); - std::cout << "Find" << std::endl; + std::clog << "Find" << std::endl; Simplex_handle sh = st.find(e); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "Endpoints" << std::endl; + std::clog << "Endpoints" << std::endl; auto p = st.endpoints(sh); test_simplex_is_vertex(st, p.first, 3); test_simplex_is_vertex(st, p.second, -7); - std::cout << "Boundary" << std::endl; + std::clog << "Boundary" << std::endl; auto&& b = st.boundary_simplex_range(sh); auto i = std::begin(b); test_simplex_is_vertex(st, *i, -7); @@ -792,8 +792,8 @@ BOOST_AUTO_TEST_CASE(non_contiguous) { } BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { - std::cout << "********************************************************************" << std::endl; - std::cout << "MAKE FILTRATION NON DECREASING" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "MAKE FILTRATION NON DECREASING" << std::endl; typedef Simplex_tree<> typeST; typeST st; @@ -810,7 +810,7 @@ BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { /* o */ /* 5 */ - std::cout << "Check default insertion ensures the filtration values are non decreasing" << std::endl; + std::clog << "Check default insertion ensures the filtration values are non decreasing" << std::endl; BOOST_CHECK(!st.make_filtration_non_decreasing()); // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0 @@ -826,7 +826,7 @@ BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { // o // 5 - std::cout << "Check default second insertion ensures the filtration values are non decreasing" << std::endl; + std::clog << "Check default second insertion ensures the filtration values are non decreasing" << std::endl; BOOST_CHECK(!st.make_filtration_non_decreasing()); // Copy original simplex tree @@ -840,7 +840,7 @@ BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { st.assign_filtration(st.find({3,4}), 1.1); st.assign_filtration(st.find({4,5}), 1.99); - std::cout << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl; + std::clog << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl; BOOST_CHECK(st.make_filtration_non_decreasing()); BOOST_CHECK(st == st_copy); @@ -856,7 +856,7 @@ BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { // By modifying just the simplex {2} // {0,1,2}, {1,2} and {0,2} will be modified - std::cout << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl; + std::clog << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl; BOOST_CHECK(st.make_filtration_non_decreasing()); BOOST_CHECK(st == st_other); @@ -869,7 +869,7 @@ BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { // Other copy simplex tree typeST st_other_copy = st; - std::cout << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl; + std::clog << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl; BOOST_CHECK(!st.make_filtration_non_decreasing()); BOOST_CHECK(st == st_other_copy); @@ -896,8 +896,8 @@ typedef boost::mpl::list>> list_of_graph_variants; BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insert_graph, Graph, list_of_graph_variants) { - std::cout << "********************************************************************" << std::endl; - std::cout << "INSERT GRAPH" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "INSERT GRAPH" << std::endl; Graph g(3); // filtration value 0 everywhere @@ -924,18 +924,18 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insert_graph, Graph, list_of_graph_va st2.insert_graph(g); BOOST_CHECK(st2.num_simplices() == 6); - std::cout << "st1 is" << std::endl; - std::cout << st1 << std::endl; + std::clog << "st1 is" << std::endl; + std::clog << st1 << std::endl; - std::cout << "st2 is" << std::endl; - std::cout << st2 << std::endl; + std::clog << "st2 is" << std::endl; + std::clog << st2 << std::endl; BOOST_CHECK(st1 == st2); } BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested_variants) { - std::cout << "********************************************************************" << std::endl; - std::cout << "TEST INSERT DUPLICATED VERTICES" << std::endl; + std::clog << "********************************************************************" << std::endl; + std::clog << "TEST INSERT DUPLICATED VERTICES" << std::endl; typeST st; typename typeST::Simplex_handle sh; @@ -943,25 +943,25 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested std::tie(sh, success) = st.insert_simplex_and_subfaces({1}); BOOST_CHECK(success); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl; + std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl; BOOST_CHECK(st.dimension(sh) == 0); std::tie(sh, success) = st.insert_simplex_and_subfaces({2, 2}); BOOST_CHECK(success); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl; + std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl; BOOST_CHECK(st.dimension(sh) == 0); std::tie(sh, success) = st.insert_simplex_and_subfaces({3, 3, 3}); BOOST_CHECK(success); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl; + std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl; BOOST_CHECK(st.dimension(sh) == 0); std::tie(sh, success) = st.insert_simplex_and_subfaces({4, 4, 4, 4}); BOOST_CHECK(success); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl; + std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl; BOOST_CHECK(st.dimension(sh) == 0); - std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() + std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() << " - num_simplices = " << st.num_simplices() << std::endl; BOOST_CHECK(st.dimension() == 0); BOOST_CHECK(st.num_simplices() == st.num_vertices()); @@ -969,10 +969,10 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested std::tie(sh, success) = st.insert_simplex_and_subfaces({2, 1, 1, 2}); BOOST_CHECK(success); BOOST_CHECK(sh != st.null_simplex()); - std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl; + std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl; BOOST_CHECK(st.dimension(sh) == 1); - std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() + std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() << " - num_simplices = " << st.num_simplices() << std::endl; BOOST_CHECK(st.dimension() == 1); BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1); @@ -982,7 +982,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested BOOST_CHECK(!success); BOOST_CHECK(sh == st.null_simplex()); - std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() + std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices() << " - num_simplices = " << st.num_simplices() << std::endl; BOOST_CHECK(st.dimension() == 1); BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1); diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp index 486827eb..d04ca289 100644 --- a/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp +++ b/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp @@ -35,13 +35,13 @@ int main(int argc, char *argv[]) { Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces(simplices.begin(), simplices.end())); - std::cout << "Simplices:" << std::endl; + std::clog << "Simplices:" << std::endl; for (const Simplex & s : complex.complex_simplex_range()) - std::cout << s << " "; - std::cout << std::endl; + std::clog << s << " "; + std::clog << std::endl; // One blocker as simplex 0123 is not in the complex but all its proper faces are. - std::cout << "Blockers: " << complex.blockers_to_string() << std::endl; + std::clog << "Blockers: " << complex.blockers_to_string() << std::endl; // now build a complex from its full list of simplices simplices.clear(); @@ -53,13 +53,13 @@ int main(int argc, char *argv[]) { simplices.push_back(Simplex(Vertex_handle(2), Vertex_handle(0))); complex = Complex(simplices.begin(), simplices.end()); - std::cout << "Simplices:" << std::endl; + std::clog << "Simplices:" << std::endl; for (const Simplex & s : complex.complex_simplex_range()) - std::cout << s << " "; - std::cout << std::endl; + std::clog << s << " "; + std::clog << std::endl; // One blocker as simplex 012 is not in the complex but all its proper faces are. - std::cout << "Blockers: " << complex.blockers_to_string() << std::endl; + std::clog << "Blockers: " << complex.blockers_to_string() << std::endl; return EXIT_SUCCESS; } diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp index 7f301047..62084692 100644 --- a/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp +++ b/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) { // more appropriated! unsigned num_vertices = 0; for (auto v : complex.vertex_range()) { - std::cout << "Vertex " << v << std::endl; + std::clog << "Vertex " << v << std::endl; ++num_vertices; } @@ -65,9 +65,9 @@ int main(int argc, char *argv[]) { else euler -= 1; } - std::cout << "Saw " << num_vertices << " vertices, " << num_edges << " edges and " << num_simplices << " simplices" + std::clog << "Saw " << num_vertices << " vertices, " << num_edges << " edges and " << num_simplices << " simplices" << std::endl; - std::cout << "The Euler Characteristic is " << euler << std::endl; - std::cout << skbl_chrono; + std::clog << "The Euler Characteristic is " << euler << std::endl; + std::clog << skbl_chrono; return EXIT_SUCCESS; } diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp index e634b656..ba7ce43c 100644 --- a/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp +++ b/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp @@ -32,25 +32,25 @@ int main(int argc, char *argv[]) { Simplex tetrahedron(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)); complex.add_simplex(tetrahedron); - std::cout << "complex:" << complex.to_string() << std::endl; + std::clog << "complex:" << complex.to_string() << std::endl; // build the link of vertex 1, eg a triangle {0,2,3} auto link = complex.link(Vertex_handle(1)); - std::cout << "link:" << link.to_string() << std::endl; + std::clog << "link:" << link.to_string() << std::endl; // Internally link is a subcomplex of 'complex' and its vertices are stored in a vector. // They can be accessed via Vertex_handle(x) where x is an index of the vector. // In that example, link has three vertices and thus it contains only // Vertex_handle(0),Vertex_handle(1) and Vertex_handle(2) are). for (int i = 0; i < 5; ++i) - std::cout << "link.contains_vertex(Vertex_handle(" << i << ")):" << link.contains_vertex(Vertex_handle(i)) << + std::clog << "link.contains_vertex(Vertex_handle(" << i << ")):" << link.contains_vertex(Vertex_handle(i)) << std::endl; - std::cout << std::endl; + std::clog << std::endl; // To access to the initial vertices eg (0,1,2,3,4), Root_vertex_handle must be used. // For instance, to test if the link contains the vertex that was labeled i: for (int i = 0; i < 5; ++i) - std::cout << "link.contains_vertex(Root_vertex_handle(" << i << ")):" << + std::clog << "link.contains_vertex(Root_vertex_handle(" << i << ")):" << link.contains_vertex(Root_vertex_handle(i)) << std::endl; return EXIT_SUCCESS; diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h index bcca851f..653a63fd 100644 --- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h +++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h @@ -154,8 +154,8 @@ of a simplicial complex. else euler -= 1; } - std::cout << "Saw "< expected; @@ -373,7 +373,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link0) { auto L2 = complex.link(alpha); BOOST_CHECK(L == L2); - std::cout << L.to_string(); + std::clog << L.to_string(); BOOST_CHECK(L.contains_vertex(*L.get_address(Root_vertex_handle(b)))); BOOST_CHECK(L.contains_vertex(*L.get_address(Root_vertex_handle(d)))); @@ -432,9 +432,9 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link2) { // Complexes built // Print result - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; - std::cout << "L= Link_complex(" << alpha << ") : \n" << L.to_string(); + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; + std::clog << "L= Link_complex(" << alpha << ") : \n" << L.to_string(); auto L2 = complex.link(alpha); BOOST_CHECK(L == L2); @@ -472,9 +472,9 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link3) { // Complexes built // Print result - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; - std::cout << "L= Link_complex(" << alpha << ") : \n" << L.to_string(); + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; + std::clog << "L= Link_complex(" << alpha << ") : \n" << L.to_string(); auto L2 = complex.link(alpha); BOOST_CHECK(L == L2); @@ -529,8 +529,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link5) { // Complexes built // Print result - std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl; - std::cout << "Link: " << L.to_string() << std::endl; + std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl; + std::clog << "Link: " << L.to_string() << std::endl; // verification BOOST_CHECK(L.num_vertices() == 0); @@ -549,8 +549,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link6) { build_link_of_blocker(complex, alpha, link_blocker_alpha); // Print result - std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl; - std::cout << "Link: " << link_blocker_alpha.to_string() << std::endl; + std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl; + std::clog << "Link: " << link_blocker_alpha.to_string() << std::endl; // verification BOOST_CHECK(link_blocker_alpha.num_vertices() == 1); @@ -579,12 +579,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link7) { //the result should be the edge {6,7} plus the blocker {0,1,2} // Print result - std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl; - std::cout << "Link: " << link_blocker_alpha.to_string() << std::endl; + std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl; + std::clog << "Link: " << link_blocker_alpha.to_string() << std::endl; Skeleton_blocker_link_complex link_blocker_alpha_cpy = link_blocker_alpha; - std::cout << "Link copy: " << link_blocker_alpha_cpy.to_string() << std::endl; + std::clog << "Link copy: " << link_blocker_alpha_cpy.to_string() << std::endl; BOOST_CHECK(link_blocker_alpha.num_vertices() == link_blocker_alpha_cpy.num_vertices()); BOOST_CHECK(link_blocker_alpha.num_blockers() == link_blocker_alpha_cpy.num_blockers()); @@ -640,7 +640,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 1:\n" << complex.to_string(); + std::clog << "Constructor 1:\n" << complex.to_string(); BOOST_CHECK(complex.num_vertices() == 6); BOOST_CHECK(complex.num_edges() == 10); @@ -677,10 +677,10 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor2) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 2:\n" << complex.to_string(); + std::clog << "Constructor 2:\n" << complex.to_string(); for (auto b : complex.const_blocker_range()) { - std::cout << "b:" << b << std::endl; + std::clog << "b:" << b << std::endl; } BOOST_CHECK(complex.num_vertices() == 5); @@ -698,7 +698,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor3) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 3:\n" << complex.to_string(); + std::clog << "Constructor 3:\n" << complex.to_string(); BOOST_CHECK(complex.num_blockers() == 1); Sh expected_blocker(Vh(0), Vh(1), Vh(2)); @@ -723,7 +723,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor4) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 4:\n" << complex.to_string(); + std::clog << "Constructor 4:\n" << complex.to_string(); BOOST_CHECK(complex.num_blockers() == 1); Sh expected_blocker(Vh(0), Vh(1), Vh(4)); for (auto b : complex.const_blocker_range()) @@ -753,7 +753,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor5) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 5:\n" << complex.to_string(); + std::clog << "Constructor 5:\n" << complex.to_string(); BOOST_CHECK(complex.num_vertices() == 6); BOOST_CHECK(complex.num_blockers() == 3); @@ -773,7 +773,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor6) { Complex complex(simplices.begin(), simplices.end()); - std::cout << "Constructor 6:\n" << complex.to_string(); + std::clog << "Constructor 6:\n" << complex.to_string(); BOOST_CHECK(complex.num_vertices() == 4); BOOST_CHECK(complex.num_blockers() == 1); @@ -795,7 +795,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor7) { //get complex from top faces Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces(simplices.begin(), simplices.end())); - std::cout << "Constructor 7:\n" << complex.to_string(); + std::clog << "Constructor 7:\n" << complex.to_string(); BOOST_CHECK(complex.num_vertices() == 4); BOOST_CHECK(complex.num_blockers() == 1); @@ -818,7 +818,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor8) { //get complex from top faces Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces(simplices.begin(), simplices.end())); - std::cout << "Constructor 8:\n" << complex.to_string(); + std::clog << "Constructor 8:\n" << complex.to_string(); BOOST_CHECK(complex.num_vertices() == 4); BOOST_CHECK(complex.num_blockers() == 2); diff --git a/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp b/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp index 8cad97a1..9042ddcf 100644 --- a/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp +++ b/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_off_reader_writer) { Gudhi::skeleton_blocker::Skeleton_blocker_off_reader off_reader("test2.off", complex); BOOST_CHECK(off_reader.is_valid()); - std::cout << "complex has " << + std::clog << "complex has " << complex.num_vertices() << " vertices, " << complex.num_blockers() << " blockers, " << complex.num_edges() << " edges and " << @@ -50,8 +50,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_off_reader_writer) { Complex same; Gudhi::skeleton_blocker::Skeleton_blocker_off_reader off_reader2("tmp.off", same); - std::cout << "\ncomplex:" << complex.to_string() << std::endl; - std::cout << "\nsame:" << same.to_string() << std::endl; + std::clog << "\ncomplex:" << complex.to_string() << std::endl; + std::clog << "\nsame:" << same.to_string() << std::endl; BOOST_CHECK(complex == same); } @@ -61,7 +61,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) { Gudhi::skeleton_blocker::Skeleton_blocker_off_reader off_reader("test2.off", complex); BOOST_CHECK(off_reader.is_valid()); - std::cout << "complex has " << + std::clog << "complex has " << complex.num_vertices() << " vertices, " << complex.num_blockers() << " blockers, " << complex.num_edges() << " edges and " << @@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) { auto link_0 = complex.abstract_link(Vertex_handle(0)); - std::cout << "\n link(0):" << link_0.to_string() << std::endl; + std::clog << "\n link(0):" << link_0.to_string() << std::endl; BOOST_CHECK(link_0.num_vertices() == 2); BOOST_CHECK(link_0.num_edges() == 1); @@ -91,13 +91,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) { BOOST_CHECK(link_0[*(edge_handle)].second() == Root_vertex_handle(4)); auto link_geometric_0 = complex.link(Vertex_handle(0)); - std::cout << "\n link_geometric(0):" << link_geometric_0.to_string() << std::endl; + std::clog << "\n link_geometric(0):" << link_geometric_0.to_string() << std::endl; BOOST_CHECK(link_0 == link_geometric_0); auto print_point = [&](Vertex_handle v) { - for (auto x : link_geometric_0.point(v)) std::cout << x << " "; - std::cout << std::endl; + for (auto x : link_geometric_0.point(v)) std::clog << x << " "; + std::clog << std::endl; }; std::for_each(link_geometric_0.vertex_range().begin(), link_geometric_0.vertex_range().end(), print_point); diff --git a/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp b/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp index b714753d..a85d4ff0 100644 --- a/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp +++ b/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp @@ -49,12 +49,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_contraction1) { static_cast (y))); // Print result - std::cout << "complex before complex" << complex.to_string() << std::endl; + std::clog << "complex before complex" << complex.to_string() << std::endl; - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; complex.contract_edge(static_cast (a), static_cast (b)); // Print result - std::cout << "ContractEdge(0,1)\n"; + std::clog << "ContractEdge(0,1)\n"; PRINT(complex.to_string()); // verification @@ -89,13 +89,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_contraction2) { complex.add_blocker(blocker); // Print result - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; complex.contract_edge(static_cast (a), static_cast (b)); - std::cout << "complex.ContractEdge(a,b)" << complex.to_string(); + std::clog << "complex.ContractEdge(a,b)" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; // there should be one blocker (a,c,d,e) in the complex BOOST_CHECK(complex.contains_blocker(Simplex(static_cast (a), static_cast (x), @@ -110,8 +110,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_link_condition1) { complex.add_blocker(Simplex(static_cast (0), static_cast (1), static_cast (2))); // Print result - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; BOOST_CHECK(complex.link_condition(Vertex_handle(1), Vertex_handle(2), true)); @@ -125,13 +125,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse0) { complex.add_edge_without_blockers(static_cast (2), static_cast (4)); complex.add_edge_without_blockers(static_cast (3), static_cast (4)); // Print result - std::cout << "initial complex :\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex :\n" << complex.to_string(); + std::clog << std::endl << std::endl; Simplex simplex_123(static_cast (1), static_cast (2), static_cast (3)); complex.remove_star(simplex_123); - std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string(); + std::clog << std::endl << std::endl; // verification BOOST_CHECK(complex.contains_blocker(simplex_123)); @@ -142,13 +142,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse1) { build_complete(4, complex); complex.add_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3))); // Print result - std::cout << "initial complex :\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex :\n" << complex.to_string(); + std::clog << std::endl << std::endl; Simplex simplex_123(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)); complex.remove_star(simplex_123); - std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string(); + std::clog << std::endl << std::endl; // verification BOOST_CHECK(complex.contains_blocker(simplex_123)); @@ -164,13 +164,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse2) { complex.add_edge_without_blockers(Vertex_handle(3), Vertex_handle(4)); complex.add_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3), Vertex_handle(4))); // Print result - std::cout << "initial complex :\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex :\n" << complex.to_string(); + std::clog << std::endl << std::endl; Simplex sigma(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)); complex.remove_star(sigma); - std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string(); + std::clog << std::endl << std::endl; // verification BOOST_CHECK(!complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), @@ -187,11 +187,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse3) { complex.add_edge_without_blockers(Vertex_handle(3), Vertex_handle(4)); complex.add_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3), Vertex_handle(4))); // Print result - std::cout << "initial complex:\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex:\n" << complex.to_string(); + std::clog << std::endl << std::endl; complex.remove_star(static_cast (2)); - std::cout << "complex after remove star of 2:\n" << complex.to_string(); + std::clog << "complex after remove star of 2:\n" << complex.to_string(); BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(3), Vertex_handle(4)))); BOOST_CHECK(!complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), @@ -202,11 +202,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex) { Complex complex(4); build_complete(4, complex); complex.add_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3))); - std::cout << "initial complex:\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex:\n" << complex.to_string(); + std::clog << std::endl << std::endl; complex.add_simplex(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3))); - std::cout << "complex after add_simplex:\n" << complex.to_string(); + std::clog << "complex after add_simplex:\n" << complex.to_string(); BOOST_CHECK(complex.num_blockers() == 1); BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)))); @@ -216,8 +216,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex2) { Complex complex; build_complete(4, complex); // Print result - std::cout << "initial complex:\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex:\n" << complex.to_string(); + std::clog << std::endl << std::endl; Complex copy(complex.num_vertices()); @@ -232,7 +232,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex2) { copy.add_simplex(simplex); } - std::cout << "complex after add_simplex:\n" << copy.to_string(); + std::clog << "complex after add_simplex:\n" << copy.to_string(); BOOST_CHECK(complex.num_blockers() == copy.num_blockers()); BOOST_CHECK(complex.num_edges() == copy.num_edges()); @@ -246,11 +246,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex3) { Simplex sigma(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2)); complex.add_blocker(sigma); // Print result - std::cout << "initial complex:\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex:\n" << complex.to_string(); + std::clog << std::endl << std::endl; complex.add_simplex(sigma); //should create two blockers 0123 and 0124 - std::cout << "complex after adding simplex 012:\n" << complex.to_string(); + std::clog << "complex after adding simplex 012:\n" << complex.to_string(); BOOST_CHECK(complex.num_blockers() == 2); BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)))); @@ -292,11 +292,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_edge) { complex.add_edge(Vertex_handle(i), Vertex_handle((i + 1) % 4)); // Print result - std::cout << "initial complex:\n" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "initial complex:\n" << complex.to_string(); + std::clog << std::endl << std::endl; complex.add_edge(Vertex_handle(1), Vertex_handle(3)); //should create two blockers 013 and 012 - std::cout << "complex after adding edge 13:\n" << complex.to_string(); + std::clog << "complex after adding edge 13:\n" << complex.to_string(); BOOST_CHECK(complex.num_blockers() == 2); BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3)))); BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)))); @@ -313,12 +313,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_remove_popable_blockers) complex.add_blocker(sigma1); complex.add_blocker(sigma2); - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; - std::cout << "complex.RemovePopableBlockers();" << std::endl; + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; + std::clog << "complex.RemovePopableBlockers();" << std::endl; complex.remove_popable_blockers(); - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; BOOST_CHECK(complex.num_blockers() == 1); @@ -337,12 +337,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_remove_popable_blockers) complex.add_blocker(sigma1); complex.add_blocker(sigma2); - std::cout << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; - std::cout << "complex.RemovePopableBlockers();" << std::endl; + std::clog << "complex complex" << complex.to_string(); + std::clog << std::endl << std::endl; + std::clog << "complex.RemovePopableBlockers();" << std::endl; complex.remove_popable_blockers(); - std::cout << "complex complex" << complex.to_string(); + std::clog << "complex complex" << complex.to_string(); - std::cout << std::endl << std::endl; + std::clog << std::endl << std::endl; BOOST_CHECK(complex.num_blockers() == 0); } diff --git a/src/Spatial_searching/example/example_spatial_searching.cpp b/src/Spatial_searching/example/example_spatial_searching.cpp index 034ad24a..8f9151fc 100644 --- a/src/Spatial_searching/example/example_spatial_searching.cpp +++ b/src/Spatial_searching/example/example_spatial_searching.cpp @@ -23,38 +23,38 @@ int main(void) { Points_ds points_ds(points); // 10-nearest neighbor query - std::cout << "10 nearest neighbors from points[20]:\n"; + std::clog << "10 nearest neighbors from points[20]:\n"; auto knn_range = points_ds.k_nearest_neighbors(points[20], 10, true); for (auto const& nghb : knn_range) - std::cout << nghb.first << " (sq. dist. = " << nghb.second << ")\n"; + std::clog << nghb.first << " (sq. dist. = " << nghb.second << ")\n"; // Incremental nearest neighbor query - std::cout << "Incremental nearest neighbors:\n"; + std::clog << "Incremental nearest neighbors:\n"; auto inn_range = points_ds.incremental_nearest_neighbors(points[45]); // Get the neighbors in distance order until we hit the first point for (auto ins_iterator = inn_range.begin(); ins_iterator->first != 0; ++ins_iterator) - std::cout << ins_iterator->first << " (sq. dist. = " << ins_iterator->second << ")\n"; + std::clog << ins_iterator->first << " (sq. dist. = " << ins_iterator->second << ")\n"; // 10-furthest neighbor query - std::cout << "10 furthest neighbors from points[20]:\n"; + std::clog << "10 furthest neighbors from points[20]:\n"; auto kfn_range = points_ds.k_furthest_neighbors(points[20], 10, true); for (auto const& nghb : kfn_range) - std::cout << nghb.first << " (sq. dist. = " << nghb.second << ")\n"; + std::clog << nghb.first << " (sq. dist. = " << nghb.second << ")\n"; // Incremental furthest neighbor query - std::cout << "Incremental furthest neighbors:\n"; + std::clog << "Incremental furthest neighbors:\n"; auto ifn_range = points_ds.incremental_furthest_neighbors(points[45]); // Get the neighbors in distance reverse order until we hit the first point for (auto ifs_iterator = ifn_range.begin(); ifs_iterator->first != 0; ++ifs_iterator) - std::cout << ifs_iterator->first << " (sq. dist. = " << ifs_iterator->second << ")\n"; + std::clog << ifs_iterator->first << " (sq. dist. = " << ifs_iterator->second << ")\n"; // All-near-neighbors search - std::cout << "All-near-neighbors search:\n"; + std::clog << "All-near-neighbors search:\n"; std::vector rs_result; points_ds.all_near_neighbors(points[45], 0.5, std::back_inserter(rs_result)); K k; for (auto const& p_idx : rs_result) - std::cout << p_idx << " (sq. dist. = " << k.squared_distance_d_object()(points[p_idx], points[45]) << ")\n"; + std::clog << p_idx << " (sq. dist. = " << k.squared_distance_d_object()(points[p_idx], points[45]) << ")\n"; return 0; } diff --git a/src/Subsampling/example/example_choose_n_farthest_points.cpp b/src/Subsampling/example/example_choose_n_farthest_points.cpp index 5cfeb4d8..27cf5d4e 100644 --- a/src/Subsampling/example/example_choose_n_farthest_points.cpp +++ b/src/Subsampling/example/example_choose_n_farthest_points.cpp @@ -23,8 +23,8 @@ int main(void) { Gudhi::subsampling::choose_n_farthest_points(k, points, 100, Gudhi::subsampling::random_starting_point, std::back_inserter(results)); - std::cout << "Before sparsification: " << points.size() << " points.\n"; - std::cout << "After sparsification: " << results.size() << " points.\n"; + std::clog << "Before sparsification: " << points.size() << " points.\n"; + std::clog << "After sparsification: " << results.size() << " points.\n"; return 0; } diff --git a/src/Subsampling/example/example_custom_kernel.cpp b/src/Subsampling/example/example_custom_kernel.cpp index f1eb757b..535bf42a 100644 --- a/src/Subsampling/example/example_custom_kernel.cpp +++ b/src/Subsampling/example/example_custom_kernel.cpp @@ -55,9 +55,9 @@ int main(void) { Gudhi::subsampling::choose_n_farthest_points(k, points, 2, Gudhi::subsampling::random_starting_point, std::back_inserter(results)); - std::cout << "Before sparsification: " << points.size() << " points.\n"; - std::cout << "After sparsification: " << results.size() << " points.\n"; - std::cout << "Result table: {" << results[0] << "," << results[1] << "}\n"; + std::clog << "Before sparsification: " << points.size() << " points.\n"; + std::clog << "After sparsification: " << results.size() << " points.\n"; + std::clog << "Result table: {" << results[0] << "," << results[1] << "}\n"; return 0; } diff --git a/src/Subsampling/example/example_pick_n_random_points.cpp b/src/Subsampling/example/example_pick_n_random_points.cpp index 25266403..316feed1 100644 --- a/src/Subsampling/example/example_pick_n_random_points.cpp +++ b/src/Subsampling/example/example_pick_n_random_points.cpp @@ -21,8 +21,8 @@ int main(void) { K k; std::vector results; Gudhi::subsampling::pick_n_random_points(points, 100, std::back_inserter(results)); - std::cout << "Before sparsification: " << points.size() << " points.\n"; - std::cout << "After sparsification: " << results.size() << " points.\n"; + std::clog << "Before sparsification: " << points.size() << " points.\n"; + std::clog << "After sparsification: " << results.size() << " points.\n"; return 0; } diff --git a/src/Subsampling/example/example_sparsify_point_set.cpp b/src/Subsampling/example/example_sparsify_point_set.cpp index a8caa720..1e2c38c1 100644 --- a/src/Subsampling/example/example_sparsify_point_set.cpp +++ b/src/Subsampling/example/example_sparsify_point_set.cpp @@ -21,8 +21,8 @@ int main(void) { K k; std::vector results; Gudhi::subsampling::sparsify_point_set(k, points, 0.4, std::back_inserter(results)); - std::cout << "Before sparsification: " << points.size() << " points.\n"; - std::cout << "After sparsification: " << results.size() << " points.\n"; + std::clog << "Before sparsification: " << points.size() << " points.\n"; + std::clog << "After sparsification: " << results.size() << " points.\n"; return 0; } diff --git a/src/Subsampling/test/test_pick_n_random_points.cpp b/src/Subsampling/test/test_pick_n_random_points.cpp index 018fb8d2..fafae2af 100644 --- a/src/Subsampling/test/test_pick_n_random_points.cpp +++ b/src/Subsampling/test/test_pick_n_random_points.cpp @@ -49,9 +49,9 @@ BOOST_AUTO_TEST_CASE(test_pick_n_random_points) std::vector results; Gudhi::subsampling::pick_n_random_points(vect, 5, std::back_inserter(results)); - std::cout << "landmark vector contains: "; + std::clog << "landmark vector contains: "; for (auto l: results) - std::cout << l << "\n"; + std::clog << l << "\n"; BOOST_CHECK(results.size() == 5); } diff --git a/src/Subsampling/test/test_sparsify_point_set.cpp b/src/Subsampling/test/test_sparsify_point_set.cpp index 587ab3ad..cdcfbff5 100644 --- a/src/Subsampling/test/test_sparsify_point_set.cpp +++ b/src/Subsampling/test/test_sparsify_point_set.cpp @@ -34,10 +34,10 @@ BOOST_AUTO_TEST_CASE(test_sparsify_point_set) K k; std::vector results; Gudhi::subsampling::sparsify_point_set(k, points, 0.5, std::back_inserter(results)); - std::cout << "Before sparsification: " << points.size() << " points.\n"; - std::cout << "After sparsification: " << results.size() << " points.\n"; + std::clog << "Before sparsification: " << points.size() << " points.\n"; + std::clog << "After sparsification: " << results.size() << " points.\n"; //for (auto p : results) - // std::cout << p << "\n"; + // std::clog << p << "\n"; BOOST_CHECK(points.size() > results.size()); } diff --git a/src/Tangential_complex/test/test_tangential_complex.cpp b/src/Tangential_complex/test/test_tangential_complex.cpp index 46caec54..023c1e1a 100644 --- a/src/Tangential_complex/test/test_tangential_complex.cpp +++ b/src/Tangential_complex/test/test_tangential_complex.cpp @@ -76,14 +76,14 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) { points.push_back(Point(point.size(), point.begin(), point.end())); point = {1.0, 1.0}; points.push_back(Point(point.size(), point.begin(), point.end())); - std::cout << "points = " << points.size() << std::endl; + std::clog << "points = " << points.size() << std::endl; Kernel k; // Compute the TC TC tc(points, INTRINSIC_DIM, k); tc.compute_tangential_complex(); TC::Num_inconsistencies num_inc = tc.number_of_inconsistent_simplices(); - std::cout << "TC vertices = " << tc.number_of_vertices() << " - simplices = " << num_inc.num_simplices << + std::clog << "TC vertices = " << tc.number_of_vertices() << " - simplices = " << num_inc.num_simplices << " - inc simplices = " << num_inc.num_inconsistent_simplices << " - inc stars = " << num_inc.num_inconsistent_stars << std::endl; @@ -95,7 +95,7 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) { // Export the TC into a Simplex_tree Gudhi::Simplex_tree<> stree; tc.create_complex(stree); - std::cout << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl; + std::clog << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl; BOOST_CHECK(stree.num_vertices() == 4); BOOST_CHECK(stree.num_simplices() == 6); @@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) { // Export the TC into a Simplex_tree tc.create_complex(stree); - std::cout << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl; + std::clog << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl; BOOST_CHECK(stree.num_vertices() == 4); BOOST_CHECK(stree.num_simplices() == 6); @@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(test_basic_example_throw) { // Compute the TC TC tc(points, INTRINSIC_DIM, k); tc.set_max_squared_edge_length(0.01); - std::cout << "test_basic_example_throw - set_max_squared_edge_length(0.01) to make GUDHI_CHECK fail" << std::endl; + std::clog << "test_basic_example_throw - set_max_squared_edge_length(0.01) to make GUDHI_CHECK fail" << std::endl; BOOST_CHECK_THROW(tc.compute_tangential_complex(), std::invalid_argument); } diff --git a/src/Toplex_map/benchmark/benchmark_tm.cpp b/src/Toplex_map/benchmark/benchmark_tm.cpp index feb5d01c..d078fcf8 100644 --- a/src/Toplex_map/benchmark/benchmark_tm.cpp +++ b/src/Toplex_map/benchmark/benchmark_tm.cpp @@ -25,10 +25,10 @@ typedef std::pair::Simplex_handle, bool> typePairSimplexBool; class ST_wrapper { public: void insert_simplex(const Simplex& tau) { - /*std::cout << "insert_simplex - " << simplexTree.num_simplices() << " - "; + /*std::clog << "insert_simplex - " << simplexTree.num_simplices() << " - "; for (auto v : tau) - std::cout << v << ", "; - std::cout << std::endl; + std::clog << v << ", "; + std::clog << std::endl; */ simplexTree.insert_simplex_and_subfaces(tau); } @@ -104,22 +104,22 @@ void chrono(int n, int d) { auto c2 = std::chrono::duration_cast(end - start).count(); if (c3 > 0) - std::cout << c1 << "\t \t" << c2 << "\t \t" << c3 << "\t \t" << K.num_maximal_simplices() << std::endl; + std::clog << c1 << "\t \t" << c2 << "\t \t" << c3 << "\t \t" << K.num_maximal_simplices() << std::endl; else - std::cout << c1 << "\t \t" << c2 << "\t \tN/A\t \t" << K.num_maximal_simplices() << std::endl; + std::clog << c1 << "\t \t" << c2 << "\t \tN/A\t \t" << K.num_maximal_simplices() << std::endl; } int main() { for (int d = 5; d <= 40; d += 5) { - std::cout << "d=" << d << " \t Insertions \t Membership \t Contractions \t Size" << std::endl; - std::cout << "T Map \t \t"; + std::clog << "d=" << d << " \t Insertions \t Membership \t Contractions \t Size" << std::endl; + std::clog << "T Map \t \t"; chrono(n, d); - std::cout << "Lazy \t \t"; + std::clog << "Lazy \t \t"; chrono(n, d); if (d <= 15) { - std::cout << "ST \t \t"; + std::clog << "ST \t \t"; chrono(n, d); } - std::cout << std::endl; + std::clog << std::endl; } } diff --git a/src/Toplex_map/example/simple_toplex_map.cpp b/src/Toplex_map/example/simple_toplex_map.cpp index 7538c989..c432608e 100644 --- a/src/Toplex_map/example/simple_toplex_map.cpp +++ b/src/Toplex_map/example/simple_toplex_map.cpp @@ -31,72 +31,72 @@ int main(int argc, char* const argv[]) { /* o---o */ /* 1 3 */ - std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() + std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() << std::endl; // Browse maximal cofaces Simplex sigma3 = {2, 3}; - std::cout << "Maximal cofaces of {2, 3} are :" << std::endl; + std::clog << "Maximal cofaces of {2, 3} are :" << std::endl; for (auto simplex_ptr : tm.maximal_cofaces(sigma3, 2)) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } Simplex sigma4 = {1, 3}; assert(tm.membership(sigma4)); Gudhi::Toplex_map::Vertex v = tm.contraction(1, 3); - std::cout << "After contraction(1, 3) - " << v << std::endl; + std::clog << "After contraction(1, 3) - " << v << std::endl; /* Simplex is: */ /* 2 4 */ /* o---o */ /* \5/ */ /* o */ /* 3 */ - std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() + std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() << std::endl; // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } Simplex sigma5 = {3, 4}; assert(tm.membership(sigma5)); v = tm.contraction(3, 4); - std::cout << "After contraction(3, 4) - " << v << std::endl; + std::clog << "After contraction(3, 4) - " << v << std::endl; /* Simplex is: */ /* 2 4 */ /* o---o */ /* \X/ */ /* o */ /* 5 */ - std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() + std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() << std::endl; // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } tm.insert_simplex(sigma1); @@ -109,44 +109,44 @@ int main(int argc, char* const argv[]) { /* 1 3 */ tm.remove_simplex(sigma1); - std::cout << "After remove_simplex(1, 2, 3)" << std::endl; + std::clog << "After remove_simplex(1, 2, 3)" << std::endl; /* Simplex is: */ /* 2 4 */ /* o---o */ /* / \5/ */ /* o---o */ /* 1 3 */ - std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() + std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() << std::endl; // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } tm.remove_vertex(1); - std::cout << "After remove_vertex(1)" << std::endl; + std::clog << "After remove_vertex(1)" << std::endl; /* Simplex is: */ /* 2 4 */ /* o---o */ /* \5/ */ /* o */ /* 3 */ - std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() + std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices() << std::endl; // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; } return 0; diff --git a/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp b/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp index 639bf35a..994cee8e 100644 --- a/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp +++ b/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp @@ -20,43 +20,43 @@ BOOST_AUTO_TEST_CASE(toplex_map) { using Vertex = Gudhi::Lazy_toplex_map::Vertex; Gudhi::Lazy_toplex_map tm; - std::cout << "insert_simplex {1, 2, 3, 4}" << std::endl; + std::clog << "insert_simplex {1, 2, 3, 4}" << std::endl; std::vector sigma1 = {1, 2, 3, 4}; tm.insert_simplex(sigma1); - std::cout << "insert_simplex {5, 2, 3, 6}" << std::endl; + std::clog << "insert_simplex {5, 2, 3, 6}" << std::endl; std::vector sigma2 = {5, 2, 3, 6}; tm.insert_simplex(sigma2); - std::cout << "insert_simplex {5}" << std::endl; + std::clog << "insert_simplex {5}" << std::endl; std::vector sigma3 = {5}; tm.insert_simplex(sigma3); - std::cout << "insert_simplex {4, 5, 3}" << std::endl; + std::clog << "insert_simplex {4, 5, 3}" << std::endl; std::vector sigma6 = {4, 5, 3}; tm.insert_simplex(sigma6); - std::cout << "insert_simplex {4, 5, 9}" << std::endl; + std::clog << "insert_simplex {4, 5, 9}" << std::endl; std::vector sigma7 = {4, 5, 9}; tm.insert_simplex(sigma7); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 5); std::vector sigma4 = {5, 2, 3}; std::vector sigma5 = {5, 2, 7}; BOOST_CHECK(tm.membership(sigma4)); BOOST_CHECK(!tm.membership(sigma5)); - std::cout << "insert_simplex {5, 2, 7}" << std::endl; + std::clog << "insert_simplex {5, 2, 7}" << std::endl; tm.insert_simplex(sigma5); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 6); BOOST_CHECK(tm.membership(sigma5)); - std::cout << "contraction(4,5)" << std::endl; + std::clog << "contraction(4,5)" << std::endl; auto r = tm.contraction(4, 5); - std::cout << "r=" << r << std::endl; + std::clog << "r=" << r << std::endl; BOOST_CHECK(r == 5); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 6); std::vector sigma8 = {1, 2, 3}; @@ -68,11 +68,11 @@ BOOST_AUTO_TEST_CASE(toplex_map) { BOOST_CHECK(tm.membership(sigma8)); BOOST_CHECK(tm.membership(sigma9)); - std::cout << "remove_simplex({2, 7, r = 5})" << std::endl; + std::clog << "remove_simplex({2, 7, r = 5})" << std::endl; tm.remove_simplex(sigma9); BOOST_CHECK(!tm.membership(sigma9)); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 8); // {2, 7, 5} is removed, but verify its edges are still there @@ -88,71 +88,71 @@ BOOST_AUTO_TEST_CASE(toplex_map_empty_toplex) { using Vertex = Gudhi::Lazy_toplex_map::Vertex; Gudhi::Lazy_toplex_map tm; - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 0); - std::cout << "num_vertices = " << tm.num_vertices() << std::endl; + std::clog << "num_vertices = " << tm.num_vertices() << std::endl; BOOST_CHECK(tm.num_vertices() == 0); - std::cout << "Check an empty simplex is a member." << std::endl; + std::clog << "Check an empty simplex is a member." << std::endl; std::vector empty_sigma = {}; BOOST_CHECK(tm.membership(empty_sigma)); - std::cout << "Check the edge 2,7 is not a member." << std::endl; + std::clog << "Check the edge 2,7 is not a member." << std::endl; std::vector edge = {2, 7}; BOOST_CHECK(!tm.membership(edge)); - std::cout << "Insert an empty simplex." << std::endl; + std::clog << "Insert an empty simplex." << std::endl; tm.insert_simplex(empty_sigma); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 0); - std::cout << "num_vertices = " << tm.num_vertices() << std::endl; + std::clog << "num_vertices = " << tm.num_vertices() << std::endl; BOOST_CHECK(tm.num_vertices() == 0); - std::cout << "Check an empty simplex is a member." << std::endl; + std::clog << "Check an empty simplex is a member." << std::endl; BOOST_CHECK(tm.membership(empty_sigma)); - std::cout << "Check the edge 2,7 is not a member." << std::endl; + std::clog << "Check the edge 2,7 is not a member." << std::endl; BOOST_CHECK(!tm.membership(edge)); - std::cout << "Insert edge 2,7." << std::endl; + std::clog << "Insert edge 2,7." << std::endl; tm.insert_simplex(edge); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 1); - std::cout << "num_vertices = " << tm.num_vertices() << std::endl; + std::clog << "num_vertices = " << tm.num_vertices() << std::endl; BOOST_CHECK(tm.num_vertices() == 2); - std::cout << "Check an empty simplex is a member." << std::endl; + std::clog << "Check an empty simplex is a member." << std::endl; BOOST_CHECK(tm.membership(empty_sigma)); - std::cout << "Check the edge 2,7 is a member." << std::endl; + std::clog << "Check the edge 2,7 is a member." << std::endl; BOOST_CHECK(tm.membership(edge)); - std::cout << "contraction(2,7)" << std::endl; + std::clog << "contraction(2,7)" << std::endl; auto r = tm.contraction(2, 7); - std::cout << "r=" << r << std::endl; + std::clog << "r=" << r << std::endl; BOOST_CHECK(r == 7); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 1); - std::cout << "num_vertices = " << tm.num_vertices() << std::endl; + std::clog << "num_vertices = " << tm.num_vertices() << std::endl; BOOST_CHECK(tm.num_vertices() == 1); - std::cout << "Check an empty simplex is a member." << std::endl; + std::clog << "Check an empty simplex is a member." << std::endl; BOOST_CHECK(tm.membership(empty_sigma)); - std::cout << "Check the edge 2,7 is not a member." << std::endl; + std::clog << "Check the edge 2,7 is not a member." << std::endl; BOOST_CHECK(!tm.membership(edge)); - std::cout << "Remove the vertex 7." << std::endl; + std::clog << "Remove the vertex 7." << std::endl; std::vector vertex = {7}; tm.remove_simplex(vertex); - std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 0); - std::cout << "num_vertices = " << tm.num_vertices() << std::endl; + std::clog << "num_vertices = " << tm.num_vertices() << std::endl; BOOST_CHECK(tm.num_vertices() == 0); - std::cout << "Check an empty simplex is a member." << std::endl; + std::clog << "Check an empty simplex is a member." << std::endl; BOOST_CHECK(tm.membership(empty_sigma)); - std::cout << "Check the edge 2,7 is not a member." << std::endl; + std::clog << "Check the edge 2,7 is not a member." << std::endl; BOOST_CHECK(!tm.membership(edge)); } diff --git a/src/Toplex_map/test/toplex_map_unit_test.cpp b/src/Toplex_map/test/toplex_map_unit_test.cpp index 24ec679b..0d0751ff 100644 --- a/src/Toplex_map/test/toplex_map_unit_test.cpp +++ b/src/Toplex_map/test/toplex_map_unit_test.cpp @@ -20,31 +20,31 @@ BOOST_AUTO_TEST_CASE(toplex_map) { using Vertex = Gudhi::Toplex_map::Vertex; Gudhi::Toplex_map tm; - std::cout << "insert_simplex {1, 2, 3, 4}" << std::endl; + std::clog << "insert_simplex {1, 2, 3, 4}" << std::endl; std::vector sigma1 = {1, 2, 3, 4}; tm.insert_simplex(sigma1); - std::cout << "insert_simplex {5, 2, 3, 6}" << std::endl; + std::clog << "insert_simplex {5, 2, 3, 6}" << std::endl; std::vector sigma2 = {5, 2, 3, 6}; tm.insert_simplex(sigma2); - std::cout << "insert_simplex {5}" << std::endl; + std::clog << "insert_simplex {5}" << std::endl; std::vector sigma3 = {5}; tm.insert_simplex(sigma3); - std::cout << "insert_simplex {4, 5, 3}" << std::endl; + std::clog << "insert_simplex {4, 5, 3}" << std::endl; std::vector sigma6 = {4, 5, 3}; tm.insert_simplex(sigma6); - std::cout << "insert_simplex {4, 5, 9}" << std::endl; + std::clog << "insert_simplex {4, 5, 9}" << std::endl; std::vector sigma7 = {4, 5, 9}; tm.insert_simplex(sigma7); - std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 4); // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; BOOST_CHECK(tm.maximality(*simplex_ptr)); } @@ -58,37 +58,37 @@ BOOST_AUTO_TEST_CASE(toplex_map) { std::vector sigma5 = {5, 2, 7}; BOOST_CHECK(tm.membership(sigma4)); BOOST_CHECK(!tm.membership(sigma5)); - std::cout << "insert_simplex {5, 2, 7}" << std::endl; + std::clog << "insert_simplex {5, 2, 7}" << std::endl; tm.insert_simplex(sigma5); - std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 5); // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; BOOST_CHECK(tm.maximality(*simplex_ptr)); } BOOST_CHECK(tm.membership(sigma5)); - std::cout << "contraction(4,5)" << std::endl; + std::clog << "contraction(4,5)" << std::endl; auto r = tm.contraction(4, 5); - std::cout << "r=" << r << std::endl; + std::clog << "r=" << r << std::endl; BOOST_CHECK(r == 5); - std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 4); // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; BOOST_CHECK(tm.maximality(*simplex_ptr)); } @@ -101,19 +101,19 @@ BOOST_AUTO_TEST_CASE(toplex_map) { BOOST_CHECK(tm.membership(sigma8)); BOOST_CHECK(tm.membership(sigma9)); - std::cout << "remove_simplex({2, 7, r = 5})" << std::endl; + std::clog << "remove_simplex({2, 7, r = 5})" << std::endl; tm.remove_simplex(sigma9); BOOST_CHECK(!tm.membership(sigma9)); - std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; + std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl; BOOST_CHECK(tm.num_maximal_simplices() == 5); // Browse maximal simplices - std::cout << "Maximal simplices are :" << std::endl; + std::clog << "Maximal simplices are :" << std::endl; for (auto simplex_ptr : tm.maximal_simplices()) { for (auto v : *simplex_ptr) { - std::cout << v << ", "; + std::clog << v << ", "; } - std::cout << std::endl; + std::clog << std::endl; BOOST_CHECK(tm.maximality(*simplex_ptr)); } // {2, 7, 5} is removed, but verify its edges are still there diff --git a/src/Witness_complex/example/example_nearest_landmark_table.cpp b/src/Witness_complex/example/example_nearest_landmark_table.cpp index 441900c1..14101847 100644 --- a/src/Witness_complex/example/example_nearest_landmark_table.cpp +++ b/src/Witness_complex/example/example_nearest_landmark_table.cpp @@ -33,7 +33,7 @@ int main(int argc, char * const argv[]) { Witness_complex witness_complex(nlt); witness_complex.create_complex(simplex_tree, .41); - std::cout << "Number of simplices: " << simplex_tree.num_simplices() << std::endl; + std::clog << "Number of simplices: " << simplex_tree.num_simplices() << std::endl; Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Witness_complex/example/example_strong_witness_complex_off.cpp b/src/Witness_complex/example/example_strong_witness_complex_off.cpp index 19f73836..583a04ab 100644 --- a/src/Witness_complex/example/example_strong_witness_complex_off.cpp +++ b/src/Witness_complex/example/example_strong_witness_complex_off.cpp @@ -38,8 +38,8 @@ int main(int argc, char* const argv[]) { } point_vector = Point_vector(off_reader.get_point_cloud()); - std::cout << "Successfully read " << point_vector.size() << " points.\n"; - std::cout << "Ambient dimension is " << point_vector[0].dimension() << ".\n"; + std::clog << "Successfully read " << point_vector.size() << " points.\n"; + std::clog << "Ambient dimension is " << point_vector[0].dimension() << ".\n"; // Choose landmarks (decomment one of the following two lines) // Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks)); @@ -52,6 +52,6 @@ int main(int argc, char* const argv[]) { witness_complex.create_complex(simplex_tree, alpha2, lim_dim); end = clock(); - std::cout << "Strong witness complex took " << static_cast(end - start) / CLOCKS_PER_SEC << " s. \n"; - std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; + std::clog << "Strong witness complex took " << static_cast(end - start) / CLOCKS_PER_SEC << " s. \n"; + std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; } diff --git a/src/Witness_complex/example/example_witness_complex_off.cpp b/src/Witness_complex/example/example_witness_complex_off.cpp index be11c955..3635da78 100644 --- a/src/Witness_complex/example/example_witness_complex_off.cpp +++ b/src/Witness_complex/example/example_witness_complex_off.cpp @@ -42,8 +42,8 @@ int main(int argc, char * const argv[]) { } point_vector = Point_vector(off_reader.get_point_cloud()); - std::cout << "Successfully read " << point_vector.size() << " points.\n"; - std::cout << "Ambient dimension is " << point_vector[0].dimension() << ".\n"; + std::clog << "Successfully read " << point_vector.size() << " points.\n"; + std::clog << "Ambient dimension is " << point_vector[0].dimension() << ".\n"; // Choose landmarks (decomment one of the following two lines) // Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks)); @@ -56,7 +56,7 @@ int main(int argc, char * const argv[]) { witness_complex.create_complex(simplex_tree, alpha2, lim_dim); end = clock(); - std::cout << "Witness complex took " + std::clog << "Witness complex took " << static_cast(end - start) / CLOCKS_PER_SEC << " s. \n"; - std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; + std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; } diff --git a/src/Witness_complex/example/example_witness_complex_sphere.cpp b/src/Witness_complex/example/example_witness_complex_sphere.cpp index 9e3c972d..78d5db4f 100644 --- a/src/Witness_complex/example/example_witness_complex_sphere.cpp +++ b/src/Witness_complex/example/example_witness_complex_sphere.cpp @@ -47,8 +47,8 @@ int main(int argc, char* const argv[]) { Gudhi::Simplex_tree<> simplex_tree; Point_Vector point_vector, landmarks; generate_points_sphere(point_vector, nbP, 4); - std::cout << "Successfully generated " << point_vector.size() << " points.\n"; - std::cout << "Ambient dimension is " << point_vector[0].size() << ".\n"; + std::clog << "Successfully generated " << point_vector.size() << " points.\n"; + std::clog << "Ambient dimension is " << point_vector[0].size() << ".\n"; // Choose landmarks start = clock(); @@ -62,8 +62,8 @@ int main(int argc, char* const argv[]) { witness_complex.create_complex(simplex_tree, 0); end = clock(); double time = static_cast(end - start) / CLOCKS_PER_SEC; - std::cout << "Witness complex for " << number_of_landmarks << " landmarks took " << time << " s. \n"; - std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; + std::clog << "Witness complex for " << number_of_landmarks << " landmarks took " << time << " s. \n"; + std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n"; l_time.push_back(std::make_pair(nbP, time)); } write_data(l_time, "w_time.dat"); diff --git a/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp b/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp index 4f718203..9b19f6dc 100644 --- a/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp +++ b/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp @@ -82,12 +82,12 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { witnesses); eucl_witness_complex.create_complex(complex, 0); - std::cout << "complex.num_simplices() = " << complex.num_simplices() << std::endl; + std::clog << "complex.num_simplices() = " << complex.num_simplices() << std::endl; BOOST_CHECK(complex.num_simplices() == 24); eucl_witness_complex.create_complex(relaxed_complex, 8.01); - std::cout << "relaxed_complex.num_simplices() = " << relaxed_complex.num_simplices() << std::endl; + std::clog << "relaxed_complex.num_simplices() = " << relaxed_complex.num_simplices() << std::endl; BOOST_CHECK(relaxed_complex.num_simplices() == 239); // The corner simplex {0,2,5,7} and its cofaces are missing. @@ -95,12 +95,12 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { WitnessComplex witness_complex(nearest_landmark_table); witness_complex.create_complex(complex_ne, 0); - std::cout << "complex.num_simplices() = " << complex_ne.num_simplices() << std::endl; + std::clog << "complex.num_simplices() = " << complex_ne.num_simplices() << std::endl; BOOST_CHECK(complex_ne.num_simplices() == 24); witness_complex.create_complex(relaxed_complex_ne, 8.01); - std::cout << "relaxed_complex.num_simplices() = " << relaxed_complex_ne.num_simplices() << std::endl; + std::clog << "relaxed_complex.num_simplices() = " << relaxed_complex_ne.num_simplices() << std::endl; BOOST_CHECK(relaxed_complex_ne.num_simplices() == 239); @@ -111,10 +111,10 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { eucl_strong_witness_complex.create_complex(strong_relaxed_complex, 9.1); eucl_strong_witness_complex.create_complex(strong_relaxed_complex2, 9.1, 2); - std::cout << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex.num_simplices() << std::endl; + std::clog << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex.num_simplices() << std::endl; BOOST_CHECK(strong_relaxed_complex.num_simplices() == 239); - std::cout << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2.num_simplices() << std::endl; + std::clog << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2.num_simplices() << std::endl; BOOST_CHECK(strong_relaxed_complex2.num_simplices() == 92); @@ -124,10 +124,10 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { strong_witness_complex.create_complex(strong_relaxed_complex_ne, 9.1); strong_witness_complex.create_complex(strong_relaxed_complex2_ne, 9.1, 2); - std::cout << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex_ne.num_simplices() << std::endl; + std::clog << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex_ne.num_simplices() << std::endl; BOOST_CHECK(strong_relaxed_complex_ne.num_simplices() == 239); - std::cout << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2_ne.num_simplices() << std::endl; + std::clog << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2_ne.num_simplices() << std::endl; BOOST_CHECK(strong_relaxed_complex2_ne.num_simplices() == 92); diff --git a/src/Witness_complex/test/test_simple_witness_complex.cpp b/src/Witness_complex/test/test_simple_witness_complex.cpp index 9e3509d3..7c48cc54 100644 --- a/src/Witness_complex/test/test_simple_witness_complex.cpp +++ b/src/Witness_complex/test/test_simple_witness_complex.cpp @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { Witness_complex witness_complex(nlt); BOOST_CHECK(witness_complex.create_complex(stree, 4.1)); - std::cout << "Number of simplices: " << stree.num_simplices() << std::endl; + std::clog << "Number of simplices: " << stree.num_simplices() << std::endl; BOOST_CHECK(stree.num_simplices() == 31); // Check when complex not empty @@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) { BOOST_CHECK(!witness_complex.create_complex(stree2, -0.02)); witness_complex.create_complex(stree2, 4.1, 2); - std::cout << "Number of simplices: " << stree2.num_simplices() << std::endl; + std::clog << "Number of simplices: " << stree2.num_simplices() << std::endl; BOOST_CHECK(stree2.num_simplices() == 25); } diff --git a/src/Witness_complex/utilities/strong_witness_persistence.cpp b/src/Witness_complex/utilities/strong_witness_persistence.cpp index 75ba1f4b..1f61c77c 100644 --- a/src/Witness_complex/utilities/strong_witness_persistence.cpp +++ b/src/Witness_complex/utilities/strong_witness_persistence.cpp @@ -56,8 +56,8 @@ int main(int argc, char* argv[]) { exit(-1); // ----- >> } witnesses = Point_vector(off_reader.get_point_cloud()); - std::cout << "Successfully read " << witnesses.size() << " points.\n"; - std::cout << "Ambient dimension is " << witnesses[0].dimension() << ".\n"; + std::clog << "Successfully read " << witnesses.size() << " points.\n"; + std::clog << "Ambient dimension is " << witnesses[0].dimension() << ".\n"; // Choose landmarks (decomment one of the following two lines) // Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks)); @@ -69,8 +69,8 @@ int main(int argc, char* argv[]) { strong_witness_complex.create_complex(simplex_tree, max_squared_alpha, lim_d); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -107,7 +107,7 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s visible.add_options()("help,h", "produce help message")("landmarks,l", po::value(&nbL), "Number of landmarks to choose from the point cloud.")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-sq-alpha,a", po::value(&max_squared_alpha)->default_value(default_alpha), "Maximal squared relaxation parameter.")( "field-charac,p", po::value(&p)->default_value(11), @@ -128,17 +128,17 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Strong witness complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Strong witness complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Witness_complex/utilities/weak_witness_persistence.cpp b/src/Witness_complex/utilities/weak_witness_persistence.cpp index 0e5b9cc1..93050af5 100644 --- a/src/Witness_complex/utilities/weak_witness_persistence.cpp +++ b/src/Witness_complex/utilities/weak_witness_persistence.cpp @@ -56,8 +56,8 @@ int main(int argc, char* argv[]) { exit(-1); // ----- >> } witnesses = Point_vector(off_reader.get_point_cloud()); - std::cout << "Successfully read " << witnesses.size() << " points.\n"; - std::cout << "Ambient dimension is " << witnesses[0].dimension() << ".\n"; + std::clog << "Successfully read " << witnesses.size() << " points.\n"; + std::clog << "Ambient dimension is " << witnesses[0].dimension() << ".\n"; // Choose landmarks (decomment one of the following two lines) // Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks)); @@ -69,8 +69,8 @@ int main(int argc, char* argv[]) { witness_complex.create_complex(simplex_tree, max_squared_alpha, lim_d); - std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; - std::cout << " and has dimension " << simplex_tree.dimension() << " \n"; + std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; + std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; // Sort the simplices in the order of the filtration simplex_tree.initialize_filtration(); @@ -107,7 +107,7 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s visible.add_options()("help,h", "produce help message")("landmarks,l", po::value(&nbL), "Number of landmarks to choose from the point cloud.")( "output-file,o", po::value(&filediag)->default_value(std::string()), - "Name of file in which the persistence diagram is written. Default print in std::cout")( + "Name of file in which the persistence diagram is written. Default print in std::clog")( "max-sq-alpha,a", po::value(&max_squared_alpha)->default_value(default_alpha), "Maximal squared relaxation parameter.")( "field-charac,p", po::value(&p)->default_value(11), @@ -128,17 +128,17 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s po::notify(vm); if (vm.count("help") || !vm.count("input-file")) { - std::cout << std::endl; - std::cout << "Compute the persistent homology with coefficient field Z/pZ \n"; - std::cout << "of a Weak witness complex defined on a set of input points.\n \n"; - std::cout << "The output diagram contains one bar per line, written with the convention: \n"; - std::cout << " p dim b d \n"; - std::cout << "where dim is the dimension of the homological feature,\n"; - std::cout << "b and d are respectively the birth and death of the feature and \n"; - std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; - - std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; - std::cout << visible << std::endl; + std::clog << std::endl; + std::clog << "Compute the persistent homology with coefficient field Z/pZ \n"; + std::clog << "of a Weak witness complex defined on a set of input points.\n \n"; + std::clog << "The output diagram contains one bar per line, written with the convention: \n"; + std::clog << " p dim b d \n"; + std::clog << "where dim is the dimension of the homological feature,\n"; + std::clog << "b and d are respectively the birth and death of the feature and \n"; + std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl; + + std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl; + std::clog << visible << std::endl; exit(-1); } } diff --git a/src/Witness_complex/utilities/witnesscomplex.md b/src/Witness_complex/utilities/witnesscomplex.md index 7ea397b9..3a3a7d83 100644 --- a/src/Witness_complex/utilities/witnesscomplex.md +++ b/src/Witness_complex/utilities/witnesscomplex.md @@ -29,7 +29,7 @@ and `p` is the characteristic of the field *Z/pZ* used for homology coefficients * `-h [ --help ]` Produce help message * `-l [ --landmarks ]` Number of landmarks to choose from the point cloud. -* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::cout. +* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::clog. * `-a [ --max-sq-alpha ]` (default = inf) Maximal squared relaxation parameter. * `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology. * `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals. @@ -60,7 +60,7 @@ and `p` is the characteristic of the field *Z/pZ* used for homology coefficients * `-h [ --help ]` Produce help message * `-l [ --landmarks ]` Number of landmarks to choose from the point cloud. -* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::cout. +* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::clog. * `-a [ --max-sq-alpha ]` (default = inf) Maximal squared relaxation parameter. * `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology. * `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals. diff --git a/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp b/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp index 0fc145fd..6fe7a887 100644 --- a/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp +++ b/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp @@ -66,7 +66,7 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi Gudhi::Points_off_reader> off_reader(off_file_name); assert(off_reader.is_valid()); - std::cout << "+ " << msg << std::endl; + std::clog << "+ " << msg << std::endl; results_csv << "\"nb_points\";" << "\"nb_simplices\";" @@ -82,7 +82,7 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi Gudhi::Euclidean_distance()); // benchmark end pg_compute_proximity_graph.end(); - std::cout << pg_compute_proximity_graph; + std::clog << pg_compute_proximity_graph; Gudhi::Simplex_tree<> complex; Gudhi::Clock st_create_clock(" benchmark_proximity_graph - complex creation"); @@ -91,13 +91,13 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi complex.insert_graph(proximity_graph); // benchmark end st_create_clock.end(); - std::cout << st_create_clock; + std::clog << st_create_clock; results_csv << off_reader.get_point_cloud().size() << ";" << complex.num_simplices() << ";" << pg_compute_proximity_graph.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl; - std::cout << " benchmark_proximity_graph - nb simplices = " << complex.num_simplices() << std::endl; + std::clog << " benchmark_proximity_graph - nb simplices = " << complex.num_simplices() << std::endl; } int main(int argc, char * const argv[]) { diff --git a/src/common/example/example_CGAL_3D_points_off_reader.cpp b/src/common/example/example_CGAL_3D_points_off_reader.cpp index 4658d8d5..7f4343f0 100644 --- a/src/common/example/example_CGAL_3D_points_off_reader.cpp +++ b/src/common/example/example_CGAL_3D_points_off_reader.cpp @@ -35,7 +35,7 @@ int main(int argc, char **argv) { int n {}; for (auto point : point_cloud) { ++n; - std::cout << "Point[" << n << "] = (" << point[0] << ", " << point[1] << ", " << point[2] << ")\n"; + std::clog << "Point[" << n << "] = (" << point[0] << ", " << point[1] << ", " << point[2] << ")\n"; } return 0; } diff --git a/src/common/example/example_CGAL_points_off_reader.cpp b/src/common/example/example_CGAL_points_off_reader.cpp index f45683a5..b2bcdbcf 100644 --- a/src/common/example/example_CGAL_points_off_reader.cpp +++ b/src/common/example/example_CGAL_points_off_reader.cpp @@ -36,10 +36,10 @@ int main(int argc, char **argv) { int n {}; for (auto point : point_cloud) { - std::cout << "Point[" << n << "] = "; + std::clog << "Point[" << n << "] = "; for (std::size_t i {0}; i < point.size(); i++) - std::cout << point[i] << " "; - std::cout << "\n"; + std::clog << point[i] << " "; + std::clog << "\n"; ++n; } return 0; diff --git a/src/common/include/gudhi/Clock.h b/src/common/include/gudhi/Clock.h index 00ab2f27..6966aaaa 100644 --- a/src/common/include/gudhi/Clock.h +++ b/src/common/include/gudhi/Clock.h @@ -41,9 +41,9 @@ class Clock { return msg; } - // Print current value to std::cout + // Print current value to std::clog void print() const { - std::cout << *this << std::endl; + std::clog << *this << std::endl; } friend std::ostream& operator<<(std::ostream& stream, const Clock& clock) { diff --git a/src/common/include/gudhi/Debug_utils.h b/src/common/include/gudhi/Debug_utils.h index d4e66d8d..f8375b00 100644 --- a/src/common/include/gudhi/Debug_utils.h +++ b/src/common/include/gudhi/Debug_utils.h @@ -27,14 +27,14 @@ #define GUDHI_CHECK_code(CODE) #endif -#define PRINT(a) std::cout << #a << ": " << (a) << " (DISP)" << std::endl +#define PRINT(a) std::clog << #a << ": " << (a) << " (DISP)" << std::endl // #define DBG_VERBOSE #ifdef DBG_VERBOSE - #define DBG(a) std::cout << "DBG: " << (a) << std::endl - #define DBGMSG(a, b) std::cout << "DBG: " << a << b << std::endl - #define DBGVALUE(a) std::cout << "DBG: " << #a << ": " << a << std::endl - #define DBGCONT(a) std::cout << "DBG: container " << #a << " -> "; for (auto x : a) std::cout << x << ","; std::cout << std::endl + #define DBG(a) std::clog << "DBG: " << (a) << std::endl + #define DBGMSG(a, b) std::clog << "DBG: " << a << b << std::endl + #define DBGVALUE(a) std::clog << "DBG: " << #a << ": " << a << std::endl + #define DBGCONT(a) std::clog << "DBG: container " << #a << " -> "; for (auto x : a) std::clog << x << ","; std::clog << std::endl #else #define DBG(a) (void) 0 #define DBGMSG(a, b) (void) 0 diff --git a/src/common/include/gudhi/Points_3D_off_io.h b/src/common/include/gudhi/Points_3D_off_io.h index 2d110af3..39b79c96 100644 --- a/src/common/include/gudhi/Points_3D_off_io.h +++ b/src/common/include/gudhi/Points_3D_off_io.h @@ -41,7 +41,7 @@ class Points_3D_off_visitor_reader { */ void init(int dim, int num_vertices, int num_faces, int num_edges) { #ifdef DEBUG_TRACES - std::cout << "Points_3D_off_visitor_reader::init - dim=" << dim << " - num_vertices=" << + std::clog << "Points_3D_off_visitor_reader::init - dim=" << dim << " - num_vertices=" << num_vertices << " - num_faces=" << num_faces << " - num_edges=" << num_edges << std::endl; #endif // DEBUG_TRACES if (dim == 3) { @@ -74,11 +74,11 @@ class Points_3D_off_visitor_reader { void point(const std::vector& point) { if (valid_) { #ifdef DEBUG_TRACES - std::cout << "Points_3D_off_visitor_reader::point "; + std::clog << "Points_3D_off_visitor_reader::point "; for (auto coordinate : point) { - std::cout << coordinate << " | "; + std::clog << coordinate << " | "; } - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES // Fill the point cloud point_cloud_.push_back(Point_3(point[0], point[1], point[2])); diff --git a/src/common/include/gudhi/Points_off_io.h b/src/common/include/gudhi/Points_off_io.h index 99371d56..9dc40568 100644 --- a/src/common/include/gudhi/Points_off_io.h +++ b/src/common/include/gudhi/Points_off_io.h @@ -40,7 +40,7 @@ class Points_off_visitor_reader { */ void init(int dim, int num_vertices, int num_faces, int num_edges) { #ifdef DEBUG_TRACES - std::cout << "Points_off_visitor_reader::init - dim=" << dim << " - num_vertices=" << + std::clog << "Points_off_visitor_reader::init - dim=" << dim << " - num_vertices=" << num_vertices << " - num_faces=" << num_faces << " - num_edges=" << num_edges << std::endl; #endif // DEBUG_TRACES if (num_faces > 0) { @@ -66,11 +66,11 @@ class Points_off_visitor_reader { */ void point(const std::vector& point) { #ifdef DEBUG_TRACES - std::cout << "Points_off_visitor_reader::point "; + std::clog << "Points_off_visitor_reader::point "; for (auto coordinate : point) { - std::cout << coordinate << " | "; + std::clog << coordinate << " | "; } - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES // Fill the point cloud point_cloud.push_back(Point_d(point.begin(), point.end())); diff --git a/src/common/include/gudhi/Unitary_tests_utils.h b/src/common/include/gudhi/Unitary_tests_utils.h index 9b86460a..9f995d01 100644 --- a/src/common/include/gudhi/Unitary_tests_utils.h +++ b/src/common/include/gudhi/Unitary_tests_utils.h @@ -20,7 +20,7 @@ template void GUDHI_TEST_FLOAT_EQUALITY_CHECK(FloatingType a, FloatingType b, FloatingType epsilon = std::numeric_limits::epsilon()) { #ifdef DEBUG_TRACES - std::cout << "GUDHI_TEST_FLOAT_EQUALITY_CHECK - " << a << " versus " << b + std::clog << "GUDHI_TEST_FLOAT_EQUALITY_CHECK - " << a << " versus " << b << " | diff = " << std::fabs(a - b) << " - epsilon = " << epsilon << std::endl; #endif BOOST_CHECK(std::fabs(a - b) <= epsilon); @@ -32,7 +32,7 @@ template FloatingType GUDHI_PROTECT_FLOAT(FloatingType value) { volatile FloatingType protected_value = value; #ifdef DEBUG_TRACES - std::cout << "GUDHI_PROTECT_FLOAT - " << protected_value << std::endl; + std::clog << "GUDHI_PROTECT_FLOAT - " << protected_value << std::endl; #endif return protected_value; } diff --git a/src/common/include/gudhi/distance_functions.h b/src/common/include/gudhi/distance_functions.h index 94cf9ccc..9bbc62b7 100644 --- a/src/common/include/gudhi/distance_functions.h +++ b/src/common/include/gudhi/distance_functions.h @@ -97,7 +97,7 @@ class Minimal_enclosing_ball_radius { Min_sphere ms(boost::size(*point_cloud.begin()), point_cloud.begin(), point_cloud.end()); #ifdef DEBUG_TRACES - std::cout << "Minimal_enclosing_ball_radius = " << std::sqrt(ms.squared_radius()) << " | nb points = " + std::clog << "Minimal_enclosing_ball_radius = " << std::sqrt(ms.squared_radius()) << " | nb points = " << boost::size(point_cloud) << " | dimension = " << boost::size(*point_cloud.begin()) << std::endl; #endif // DEBUG_TRACES diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h index ac9e987b..0938f5c1 100644 --- a/src/common/include/gudhi/reader_utils.h +++ b/src/common/include/gudhi/reader_utils.h @@ -220,7 +220,7 @@ template std::vector> read_lower_triangular_matrix_from_csv_file(const std::string& filename, const char separator = ';') { #ifdef DEBUG_TRACES - std::cout << "Using procedure read_lower_triangular_matrix_from_csv_file \n"; + std::clog << "Using procedure read_lower_triangular_matrix_from_csv_file \n"; #endif // DEBUG_TRACES std::vector> result; std::ifstream in; @@ -272,12 +272,12 @@ std::vector> read_lower_triangular_matrix_from_csv in.close(); #ifdef DEBUG_TRACES - std::cout << "Here is the matrix we read : \n"; + std::clog << "Here is the matrix we read : \n"; for (size_t i = 0; i != result.size(); ++i) { for (size_t j = 0; j != result[i].size(); ++j) { - std::cout << result[i][j] << " "; + std::clog << result[i][j] << " "; } - std::cout << std::endl; + std::clog << std::endl; } #endif // DEBUG_TRACES @@ -294,7 +294,7 @@ Note: the function does not check that birth <= death. template void read_persistence_intervals_and_dimension(std::string const& filename, OutputIterator out) { #ifdef DEBUG_TRACES - std::cout << "read_persistence_intervals_and_dimension - " << filename << std::endl; + std::clog << "read_persistence_intervals_and_dimension - " << filename << std::endl; #endif // DEBUG_TRACES std::ifstream in(filename); if (!in.is_open()) { @@ -311,11 +311,11 @@ void read_persistence_intervals_and_dimension(std::string const& filename, Outpu double numbers[4]; int n = sscanf(line.c_str(), "%lf %lf %lf %lf", &numbers[0], &numbers[1], &numbers[2], &numbers[3]); #ifdef DEBUG_TRACES - std::cout << "[" << n << "] = "; + std::clog << "[" << n << "] = "; for (int i = 0; i < n; i++) { - std::cout << numbers[i] << ","; + std::clog << numbers[i] << ","; } - std::cout << std::endl; + std::clog << std::endl; #endif // DEBUG_TRACES if (n >= 2) { int dim = (n >= 3 ? static_cast(numbers[n - 3]) : -1); diff --git a/src/common/include/gudhi/writing_persistence_to_file.h b/src/common/include/gudhi/writing_persistence_to_file.h index 2e36b831..cdd8be0a 100644 --- a/src/common/include/gudhi/writing_persistence_to_file.h +++ b/src/common/include/gudhi/writing_persistence_to_file.h @@ -94,7 +94,7 @@ class Persistence_interval_common { **/ template void write_persistence_intervals_to_stream(const Persistence_interval_range& intervals, - std::ostream& out = std::cout) { + std::ostream& out = std::clog) { for (auto interval : intervals) { out << interval << "\n"; } diff --git a/src/common/test/test_distance_matrix_reader.cpp b/src/common/test/test_distance_matrix_reader.cpp index bb619a29..73be8104 100644 --- a/src/common/test/test_distance_matrix_reader.cpp +++ b/src/common/test/test_distance_matrix_reader.cpp @@ -28,15 +28,15 @@ BOOST_AUTO_TEST_CASE( lower_triangular_distance_matrix ) ','); for (auto& i : from_lower_triangular) { for (auto j : i) { - std::cout << j << " "; + std::clog << j << " "; } - std::cout << std::endl; + std::clog << std::endl; } - std::cout << "from_lower_triangular size = " << from_lower_triangular.size() << std::endl; + std::clog << "from_lower_triangular size = " << from_lower_triangular.size() << std::endl; BOOST_CHECK(from_lower_triangular.size() == 5); for (std::size_t i = 0; i < from_lower_triangular.size(); i++) { - std::cout << "from_lower_triangular[" << i << "] size = " << from_lower_triangular[i].size() << std::endl; + std::clog << "from_lower_triangular[" << i << "] size = " << from_lower_triangular[i].size() << std::endl; BOOST_CHECK(from_lower_triangular[i].size() == i); } std::vector expected = {1}; @@ -60,14 +60,14 @@ BOOST_AUTO_TEST_CASE( full_square_distance_matrix ) from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file("full_square_distance_matrix.csv"); for (auto& i : from_full_square) { for (auto j : i) { - std::cout << j << " "; + std::clog << j << " "; } - std::cout << std::endl; + std::clog << std::endl; } - std::cout << "from_full_square size = " << from_full_square.size() << std::endl; + std::clog << "from_full_square size = " << from_full_square.size() << std::endl; BOOST_CHECK(from_full_square.size() == 7); for (std::size_t i = 0; i < from_full_square.size(); i++) { - std::cout << "from_full_square[" << i << "] size = " << from_full_square[i].size() << std::endl; + std::clog << "from_full_square[" << i << "] size = " << from_full_square[i].size() << std::endl; BOOST_CHECK(from_full_square[i].size() == i); } } diff --git a/src/common/test/test_persistence_intervals_reader.cpp b/src/common/test/test_persistence_intervals_reader.cpp index 8fb4377d..ac8d0981 100644 --- a/src/common/test/test_persistence_intervals_reader.cpp +++ b/src/common/test/test_persistence_intervals_reader.cpp @@ -35,18 +35,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_without_dimension ) Persistence_intervals_by_dimension persistence_intervals_by_dimension = Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_without_dimension.pers"); - std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; for (auto map_iter : expected_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } - std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n"; for (auto map_iter : persistence_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension); @@ -60,13 +60,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_without_dimension ) Persistence_intervals persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_without_dimension.pers"); - std::cout << "\nread_persistence_intervals_in_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension - read\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -103,18 +103,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) Persistence_intervals_by_dimension persistence_intervals_by_dimension = Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_with_dimension.pers"); - std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; for (auto map_iter : expected_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } - std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n"; for (auto map_iter : persistence_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension); @@ -128,13 +128,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) Persistence_intervals persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers"); - std::cout << "\nread_persistence_intervals_in_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -143,13 +143,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 0); - std::cout << "\nread_persistence_intervals_in_dimension 0 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 0 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 0 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 0 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -159,13 +159,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 1); - std::cout << "\nread_persistence_intervals_in_dimension 1 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 1 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 1 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 1 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -173,13 +173,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 2); - std::cout << "\nread_persistence_intervals_in_dimension 2 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 2 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 2 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 2 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -188,13 +188,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 3); - std::cout << "\nread_persistence_intervals_in_dimension 3 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 3 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 3 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 3 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -212,18 +212,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) Persistence_intervals_by_dimension persistence_intervals_by_dimension = Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_with_field.pers"); - std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n"; for (auto map_iter : expected_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } - std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n"; + std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n"; for (auto map_iter : persistence_intervals_by_dimension) { - std::cout << "key=" << map_iter.first; + std::clog << "key=" << map_iter.first; for (auto vec_iter : map_iter.second) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; } BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension); @@ -237,13 +237,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) Persistence_intervals persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers"); - std::cout << "\nread_persistence_intervals_in_dimension - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -252,13 +252,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 0); - std::cout << "\nread_persistence_intervals_in_dimension 0 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 0 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 0 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 0 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -268,13 +268,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 1); - std::cout << "\nread_persistence_intervals_in_dimension 1 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 1 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 1 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 1 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -282,13 +282,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 2); - std::cout << "\nread_persistence_intervals_in_dimension 2 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 2 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 2 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 2 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); @@ -297,13 +297,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field ) persistence_intervals_in_dimension = Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 3); - std::cout << "\nread_persistence_intervals_in_dimension 3 - expected\n"; + std::clog << "\nread_persistence_intervals_in_dimension 3 - expected\n"; for (auto vec_iter : expected_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; - std::cout << "\nread_persistence_intervals_in_dimension 3 - read\n"; + std::clog << "\nread_persistence_intervals_in_dimension 3 - read\n"; for (auto vec_iter : persistence_intervals_in_dimension) - std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] "; + std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] "; BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension); -- cgit v1.2.3 From a064f5698fedbe13f6c343cb0b82e0f4d72caffb Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 27 Jan 2020 17:37:31 +0100 Subject: A first naive iterator implementation with yield --- src/Simplex_tree/example/simple_simplex_tree.cpp | 4 ++++ src/python/gudhi/simplex_tree.pxd | 8 ++++++- src/python/gudhi/simplex_tree.pyx | 18 +++++++-------- src/python/include/Simplex_tree_interface.h | 28 ++++++++++++++---------- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp index 4353939f..92ab923b 100644 --- a/src/Simplex_tree/example/simple_simplex_tree.cpp +++ b/src/Simplex_tree/example/simple_simplex_tree.cpp @@ -165,6 +165,10 @@ int main(int argc, char* const argv[]) { // ++ GENERAL VARIABLE SET + //std::vector::const_iterator + std::vector::const_iterator begin = simplexTree.filtration_simplex_range().begin(); + auto end = simplexTree.filtration_simplex_range().end(); + std::cout << "********************************************************************\n"; // Display the Simplex_tree - Can not be done in the middle of 2 inserts std::cout << "* The complex contains " << simplexTree.num_simplices() << " simplices\n"; diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 96d14079..caf3c459 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -21,6 +21,9 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_options_full_featured: pass + cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface::Simplex_handle": + pass + cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface": Simplex_tree() double simplex_filtration(vector[int] simplex) @@ -34,7 +37,6 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool find_simplex(vector[int] simplex) bool insert_simplex_and_subfaces(vector[int] simplex, double filtration) - vector[pair[vector[int], double]] get_filtration() vector[pair[vector[int], double]] get_skeleton(int dimension) vector[pair[vector[int], double]] get_star(vector[int] simplex) vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, @@ -43,6 +45,10 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": void remove_maximal_simplex(vector[int] simplex) bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() + # Iterators over Simplex tree + pair[vector[int], double] get_simplex_filtration(Simplex_tree_simplex_handle f_simplex) + vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() + vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index b18627c4..478139de 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -7,6 +7,7 @@ # Modification(s): # - YYYY/MM Author: Description of the modification +from cython.operator import dereference, preincrement from libc.stdint cimport intptr_t from numpy import array as np_array cimport simplex_tree @@ -214,15 +215,14 @@ cdef class SimplexTree: :returns: The simplices sorted by increasing filtration values. :rtype: list of tuples(simplex, filtration) """ - cdef vector[pair[vector[int], double]] filtration \ - = self.get_ptr().get_filtration() - ct = [] - for filtered_complex in filtration: - v = [] - for vertex in filtered_complex.first: - v.append(vertex) - ct.append((v, filtered_complex.second)) - return ct + cdef vector[Simplex_tree_simplex_handle].const_iterator it = self.get_ptr().get_filtration_iterator_begin() + cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end() + + while True: + yield(self.get_ptr().get_simplex_filtration(dereference(it))) + preincrement(it) + if it == end: + raise StopIteration def get_skeleton(self, dimension): """This function returns the (simplices of the) skeleton of a maximum diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 06f31341..843966cd 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -33,7 +33,8 @@ class Simplex_tree_interface : public Simplex_tree { using Simplex_handle = typename Base::Simplex_handle; using Insertion_result = typename std::pair; using Simplex = std::vector; - using Filtered_simplices = std::vector>; + using Filtered_simplex = std::pair; + using Filtered_simplices = std::vector; public: bool find_simplex(const Simplex& vh) { @@ -82,17 +83,12 @@ class Simplex_tree_interface : public Simplex_tree { Base::initialize_filtration(); } - Filtered_simplices get_filtration() { - Base::initialize_filtration(); - Filtered_simplices filtrations; - for (auto f_simplex : Base::filtration_simplex_range()) { - Simplex simplex; - for (auto vertex : Base::simplex_vertex_range(f_simplex)) { - simplex.insert(simplex.begin(), vertex); - } - filtrations.push_back(std::make_pair(simplex, Base::filtration(f_simplex))); + Filtered_simplex get_simplex_filtration(Simplex_handle f_simplex) { + Simplex simplex; + for (auto vertex : Base::simplex_vertex_range(f_simplex)) { + simplex.insert(simplex.begin(), vertex); } - return filtrations; + return std::make_pair(simplex, Base::filtration(f_simplex)); } Filtered_simplices get_skeleton(int dimension) { @@ -135,6 +131,16 @@ class Simplex_tree_interface : public Simplex_tree { Base::initialize_filtration(); pcoh = new Gudhi::Persistent_cohomology_interface(*this); } + + // Iterator over the simplex tree + typename std::vector::const_iterator get_filtration_iterator_begin() { + Base::initialize_filtration(); + return Base::filtration_simplex_range().begin(); + } + + typename std::vector::const_iterator get_filtration_iterator_end() { + return Base::filtration_simplex_range().end(); + } }; } // namespace Gudhi -- cgit v1.2.3 From 0b77fdd5d9bd057103cb23020089a6628c1f14e6 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 27 Jan 2020 17:39:48 +0100 Subject: Rollback unnecessary --- src/Simplex_tree/example/simple_simplex_tree.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp index 92ab923b..4353939f 100644 --- a/src/Simplex_tree/example/simple_simplex_tree.cpp +++ b/src/Simplex_tree/example/simple_simplex_tree.cpp @@ -165,10 +165,6 @@ int main(int argc, char* const argv[]) { // ++ GENERAL VARIABLE SET - //std::vector::const_iterator - std::vector::const_iterator begin = simplexTree.filtration_simplex_range().begin(); - auto end = simplexTree.filtration_simplex_range().end(); - std::cout << "********************************************************************\n"; // Display the Simplex_tree - Can not be done in the middle of 2 inserts std::cout << "* The complex contains " << simplexTree.num_simplices() << " simplices\n"; -- cgit v1.2.3 From ef2c5b53e88321f07ad93496f00dde16dc20f018 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 28 Jan 2020 11:05:39 +0100 Subject: Code review: rename get_simplex_filtration with get_simplex_and_filtration. Remove exception raise. Fix failed tests. Reword documentation --- .../example/alpha_complex_from_points_example.py | 5 +- .../example/rips_complex_from_points_example.py | 5 +- src/python/example/simplex_tree_example.py | 5 +- src/python/gudhi/simplex_tree.pxd | 2 +- src/python/gudhi/simplex_tree.pyx | 10 +-- src/python/include/Simplex_tree_interface.h | 6 +- src/python/test/test_alpha_complex.py | 50 ++++++------ src/python/test/test_euclidean_witness_complex.py | 46 ++++++----- src/python/test/test_rips_complex.py | 53 +++++++------ src/python/test/test_simplex_tree.py | 90 +++++++++++----------- src/python/test/test_tangential_complex.py | 19 +++-- 11 files changed, 161 insertions(+), 130 deletions(-) diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py index 844d7a82..465632eb 100755 --- a/src/python/example/alpha_complex_from_points_example.py +++ b/src/python/example/alpha_complex_from_points_example.py @@ -47,7 +47,10 @@ else: print("[4] Not found...") print("dimension=", simplex_tree.dimension()) -print("filtrations=", simplex_tree.get_filtration()) +print("filtrations=") +for simplex_with_filtration in simplex_tree.get_filtration(): + print("(%s, %.2f)" % tuple(simplex_with_filtration)) + print("star([0])=", simplex_tree.get_star([0])) print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1)) diff --git a/src/python/example/rips_complex_from_points_example.py b/src/python/example/rips_complex_from_points_example.py index 59d8a261..c05703c6 100755 --- a/src/python/example/rips_complex_from_points_example.py +++ b/src/python/example/rips_complex_from_points_example.py @@ -22,6 +22,9 @@ rips = gudhi.RipsComplex(points=[[0, 0], [1, 0], [0, 1], [1, 1]], max_edge_lengt simplex_tree = rips.create_simplex_tree(max_dimension=1) -print("filtrations=", simplex_tree.get_filtration()) +print("filtrations=") +for simplex_with_filtration in simplex_tree.get_filtration(): + print("(%s, %.2f)" % tuple(simplex_with_filtration)) + print("star([0])=", simplex_tree.get_star([0])) print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1)) diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py index 30de00da..7f20c389 100755 --- a/src/python/example/simplex_tree_example.py +++ b/src/python/example/simplex_tree_example.py @@ -39,7 +39,10 @@ else: print("dimension=", st.dimension()) st.initialize_filtration() -print("filtration=", st.get_filtration()) +print("filtration=") +for simplex_with_filtration in st.get_filtration(): + print("(%s, %.2f)" % tuple(simplex_with_filtration)) + print("filtration[1, 2]=", st.filtration([1, 2])) print("filtration[4, 2]=", st.filtration([4, 2])) diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index caf3c459..1b0dc881 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -46,7 +46,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() # Iterators over Simplex tree - pair[vector[int], double] get_simplex_filtration(Simplex_tree_simplex_handle f_simplex) + pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 478139de..22978b6e 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -209,20 +209,18 @@ cdef class SimplexTree: filtration) def get_filtration(self): - """This function returns a list of all simplices with their given + """This function returns a generator with simplices and their given filtration values. :returns: The simplices sorted by increasing filtration values. - :rtype: list of tuples(simplex, filtration) + :rtype: generator with tuples(simplex, filtration) """ cdef vector[Simplex_tree_simplex_handle].const_iterator it = self.get_ptr().get_filtration_iterator_begin() cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end() - while True: - yield(self.get_ptr().get_simplex_filtration(dereference(it))) + while it != end: + yield(self.get_ptr().get_simplex_and_filtration(dereference(it))) preincrement(it) - if it == end: - raise StopIteration def get_skeleton(self, dimension): """This function returns the (simplices of the) skeleton of a maximum diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 843966cd..c0bbc3d9 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -33,8 +33,8 @@ class Simplex_tree_interface : public Simplex_tree { using Simplex_handle = typename Base::Simplex_handle; using Insertion_result = typename std::pair; using Simplex = std::vector; - using Filtered_simplex = std::pair; - using Filtered_simplices = std::vector; + using Simplex_and_filtration = std::pair; + using Filtered_simplices = std::vector; public: bool find_simplex(const Simplex& vh) { @@ -83,7 +83,7 @@ class Simplex_tree_interface : public Simplex_tree { Base::initialize_filtration(); } - Filtered_simplex get_simplex_filtration(Simplex_handle f_simplex) { + Simplex_and_filtration get_simplex_and_filtration(Simplex_handle f_simplex) { Simplex simplex; for (auto vertex : Base::simplex_vertex_range(f_simplex)) { simplex.insert(simplex.begin(), vertex); diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py index 3761fe16..ceead919 100755 --- a/src/python/test/test_alpha_complex.py +++ b/src/python/test/test_alpha_complex.py @@ -40,19 +40,21 @@ def test_infinite_alpha(): assert simplex_tree.num_simplices() == 11 assert simplex_tree.num_vertices() == 4 - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([3], 0.0), - ([0, 1], 0.25), - ([0, 2], 0.25), - ([1, 3], 0.25), - ([2, 3], 0.25), - ([1, 2], 0.5), - ([0, 1, 2], 0.5), - ([1, 2, 3], 0.5), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([3], 0.0)) + assert(next(filtration_generator) == ([0, 1], 0.25)) + assert(next(filtration_generator) == ([0, 2], 0.25)) + assert(next(filtration_generator) == ([1, 3], 0.25)) + assert(next(filtration_generator) == ([2, 3], 0.25)) + assert(next(filtration_generator) == ([1, 2], 0.5)) + assert(next(filtration_generator) == ([0, 1, 2], 0.5)) + assert(next(filtration_generator) == ([1, 2, 3], 0.5)) + with pytest.raises(StopIteration): + next(filtration_generator) + assert simplex_tree.get_star([0]) == [ ([0], 0.0), ([0, 1], 0.25), @@ -105,16 +107,18 @@ def test_filtered_alpha(): else: assert False - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([3], 0.0), - ([0, 1], 0.25), - ([0, 2], 0.25), - ([1, 3], 0.25), - ([2, 3], 0.25), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([3], 0.0)) + assert(next(filtration_generator) == ([0, 1], 0.25)) + assert(next(filtration_generator) == ([0, 2], 0.25)) + assert(next(filtration_generator) == ([1, 3], 0.25)) + assert(next(filtration_generator) == ([2, 3], 0.25)) + with pytest.raises(StopIteration): + next(filtration_generator) + assert simplex_tree.get_star([0]) == [([0], 0.0), ([0, 1], 0.25), ([0, 2], 0.25)] assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)] diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py index c18d2484..16ff1ef4 100755 --- a/src/python/test/test_euclidean_witness_complex.py +++ b/src/python/test/test_euclidean_witness_complex.py @@ -9,6 +9,7 @@ """ import gudhi +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -40,15 +41,16 @@ def test_witness_complex(): assert landmarks[1] == euclidean_witness_complex.get_point(1) assert landmarks[2] == euclidean_witness_complex.get_point(2) - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([0, 1], 0.0), - ([2], 0.0), - ([0, 2], 0.0), - ([1, 2], 0.0), - ([0, 1, 2], 0.0), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([0, 1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([0, 2], 0.0)) + assert(next(filtration_generator) == ([1, 2], 0.0)) + assert(next(filtration_generator) == ([0, 1, 2], 0.0)) + with pytest.raises(StopIteration): + next(filtration_generator) def test_empty_euclidean_strong_witness_complex(): @@ -78,18 +80,24 @@ def test_strong_witness_complex(): assert landmarks[1] == euclidean_strong_witness_complex.get_point(1) assert landmarks[2] == euclidean_strong_witness_complex.get_point(2) - assert simplex_tree.get_filtration() == [([0], 0.0), ([1], 0.0), ([2], 0.0)] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + with pytest.raises(StopIteration): + next(filtration_generator) simplex_tree = euclidean_strong_witness_complex.create_simplex_tree( max_alpha_square=100.0 ) - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([1, 2], 15.0), - ([0, 2], 34.0), - ([0, 1], 37.0), - ([0, 1, 2], 37.0), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([1, 2], 15.0)) + assert(next(filtration_generator) == ([0, 2], 34.0)) + assert(next(filtration_generator) == ([0, 1], 37.0)) + assert(next(filtration_generator) == ([0, 1, 2], 37.0)) + with pytest.raises(StopIteration): + next(filtration_generator) diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py index b02a68e1..bd31c47c 100755 --- a/src/python/test/test_rips_complex.py +++ b/src/python/test/test_rips_complex.py @@ -10,6 +10,7 @@ from gudhi import RipsComplex from math import sqrt +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -32,18 +33,20 @@ def test_rips_from_points(): assert simplex_tree.num_simplices() == 10 assert simplex_tree.num_vertices() == 4 - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([3], 0.0), - ([0, 1], 1.0), - ([0, 2], 1.0), - ([1, 3], 1.0), - ([2, 3], 1.0), - ([1, 2], 1.4142135623730951), - ([0, 3], 1.4142135623730951), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([3], 0.0)) + assert(next(filtration_generator) == ([0, 1], 1.0)) + assert(next(filtration_generator) == ([0, 2], 1.0)) + assert(next(filtration_generator) == ([1, 3], 1.0)) + assert(next(filtration_generator) == ([2, 3], 1.0)) + assert(next(filtration_generator) == ([1, 2], 1.4142135623730951)) + assert(next(filtration_generator) == ([0, 3], 1.4142135623730951)) + with pytest.raises(StopIteration): + next(filtration_generator) + assert simplex_tree.get_star([0]) == [ ([0], 0.0), ([0, 1], 1.0), @@ -95,18 +98,20 @@ def test_rips_from_distance_matrix(): assert simplex_tree.num_simplices() == 10 assert simplex_tree.num_vertices() == 4 - assert simplex_tree.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([3], 0.0), - ([0, 1], 1.0), - ([0, 2], 1.0), - ([1, 3], 1.0), - ([2, 3], 1.0), - ([1, 2], 1.4142135623730951), - ([0, 3], 1.4142135623730951), - ] + filtration_generator = simplex_tree.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([3], 0.0)) + assert(next(filtration_generator) == ([0, 1], 1.0)) + assert(next(filtration_generator) == ([0, 2], 1.0)) + assert(next(filtration_generator) == ([1, 3], 1.0)) + assert(next(filtration_generator) == ([2, 3], 1.0)) + assert(next(filtration_generator) == ([1, 2], 1.4142135623730951)) + assert(next(filtration_generator) == ([0, 3], 1.4142135623730951)) + with pytest.raises(StopIteration): + next(filtration_generator) + assert simplex_tree.get_star([0]) == [ ([0], 0.0), ([0, 1], 1.0), diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 1822c43b..0f3db7ac 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -9,6 +9,7 @@ """ from gudhi import SimplexTree +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -126,55 +127,58 @@ def test_expansion(): assert st.num_vertices() == 7 assert st.num_simplices() == 17 - assert st.get_filtration() == [ - ([2], 0.1), - ([3], 0.1), - ([2, 3], 0.1), - ([0], 0.2), - ([0, 2], 0.2), - ([1], 0.3), - ([0, 1], 0.3), - ([1, 3], 0.4), - ([1, 2], 0.5), - ([5], 0.6), - ([6], 0.6), - ([5, 6], 0.6), - ([4], 0.7), - ([2, 4], 0.7), - ([0, 3], 0.8), - ([4, 6], 0.9), - ([3, 6], 1.0), - ] + + filtration_generator = st.get_filtration() + assert(next(filtration_generator) == ([2], 0.1)) + assert(next(filtration_generator) == ([3], 0.1)) + assert(next(filtration_generator) == ([2, 3], 0.1)) + assert(next(filtration_generator) == ([0], 0.2)) + assert(next(filtration_generator) == ([0, 2], 0.2)) + assert(next(filtration_generator) == ([1], 0.3)) + assert(next(filtration_generator) == ([0, 1], 0.3)) + assert(next(filtration_generator) == ([1, 3], 0.4)) + assert(next(filtration_generator) == ([1, 2], 0.5)) + assert(next(filtration_generator) == ([5], 0.6)) + assert(next(filtration_generator) == ([6], 0.6)) + assert(next(filtration_generator) == ([5, 6], 0.6)) + assert(next(filtration_generator) == ([4], 0.7)) + assert(next(filtration_generator) == ([2, 4], 0.7)) + assert(next(filtration_generator) == ([0, 3], 0.8)) + assert(next(filtration_generator) == ([4, 6], 0.9)) + assert(next(filtration_generator) == ([3, 6], 1.0)) + with pytest.raises(StopIteration): + next(filtration_generator) st.expansion(3) assert st.num_vertices() == 7 assert st.num_simplices() == 22 st.initialize_filtration() - assert st.get_filtration() == [ - ([2], 0.1), - ([3], 0.1), - ([2, 3], 0.1), - ([0], 0.2), - ([0, 2], 0.2), - ([1], 0.3), - ([0, 1], 0.3), - ([1, 3], 0.4), - ([1, 2], 0.5), - ([0, 1, 2], 0.5), - ([1, 2, 3], 0.5), - ([5], 0.6), - ([6], 0.6), - ([5, 6], 0.6), - ([4], 0.7), - ([2, 4], 0.7), - ([0, 3], 0.8), - ([0, 1, 3], 0.8), - ([0, 2, 3], 0.8), - ([0, 1, 2, 3], 0.8), - ([4, 6], 0.9), - ([3, 6], 1.0), - ] + filtration_generator = st.get_filtration() + assert(next(filtration_generator) == ([2], 0.1)) + assert(next(filtration_generator) == ([3], 0.1)) + assert(next(filtration_generator) == ([2, 3], 0.1)) + assert(next(filtration_generator) == ([0], 0.2)) + assert(next(filtration_generator) == ([0, 2], 0.2)) + assert(next(filtration_generator) == ([1], 0.3)) + assert(next(filtration_generator) == ([0, 1], 0.3)) + assert(next(filtration_generator) == ([1, 3], 0.4)) + assert(next(filtration_generator) == ([1, 2], 0.5)) + assert(next(filtration_generator) == ([0, 1, 2], 0.5)) + assert(next(filtration_generator) == ([1, 2, 3], 0.5)) + assert(next(filtration_generator) == ([5], 0.6)) + assert(next(filtration_generator) == ([6], 0.6)) + assert(next(filtration_generator) == ([5, 6], 0.6)) + assert(next(filtration_generator) == ([4], 0.7)) + assert(next(filtration_generator) == ([2, 4], 0.7)) + assert(next(filtration_generator) == ([0, 3], 0.8)) + assert(next(filtration_generator) == ([0, 1, 3], 0.8)) + assert(next(filtration_generator) == ([0, 2, 3], 0.8)) + assert(next(filtration_generator) == ([0, 1, 2, 3], 0.8)) + assert(next(filtration_generator) == ([4, 6], 0.9)) + assert(next(filtration_generator) == ([3, 6], 1.0)) + with pytest.raises(StopIteration): + next(filtration_generator) def test_automatic_dimension(): diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py index e650e99c..90e2c75b 100755 --- a/src/python/test/test_tangential_complex.py +++ b/src/python/test/test_tangential_complex.py @@ -9,6 +9,7 @@ """ from gudhi import TangentialComplex, SimplexTree +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -37,14 +38,16 @@ def test_tangential(): assert st.num_simplices() == 6 assert st.num_vertices() == 4 - assert st.get_filtration() == [ - ([0], 0.0), - ([1], 0.0), - ([2], 0.0), - ([0, 2], 0.0), - ([3], 0.0), - ([1, 3], 0.0), - ] + filtration_generator = st.get_filtration() + assert(next(filtration_generator) == ([0], 0.0)) + assert(next(filtration_generator) == ([1], 0.0)) + assert(next(filtration_generator) == ([2], 0.0)) + assert(next(filtration_generator) == ([0, 2], 0.0)) + assert(next(filtration_generator) == ([3], 0.0)) + assert(next(filtration_generator) == ([1, 3], 0.0)) + with pytest.raises(StopIteration): + next(filtration_generator) + assert st.get_cofaces([0], 1) == [([0, 2], 0.0)] assert point_list[0] == tc.get_point(0) -- cgit v1.2.3 From 48952ee2ad76e2f4e5ada7f038ff88dee496272a Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 30 Jan 2020 11:58:52 +0100 Subject: Allow use of preinstalled Hera --- src/cmake/modules/GUDHI_third_party_libraries.cmake | 3 +++ src/python/setup.py.in | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index cb9f9033..359d1c12 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -35,6 +35,9 @@ if(CGAL_FOUND) include( ${CGAL_USE_FILE} ) endif() +# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera. +set(HERA_WASSERSTEIN_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include CACHE PATH "Directory where one can find Hera's wasserstein.h") + option(WITH_GUDHI_USE_TBB "Build with Intel TBB parallelization" ON) # Find TBB package for parallel sort - not mandatory, just optional. diff --git a/src/python/setup.py.in b/src/python/setup.py.in index 08c46ced..851188bd 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -63,7 +63,7 @@ ext_modules.append(Extension( sources = [source_dir + 'hera.cc'], language = 'c++', include_dirs = include_dirs + - ['@CMAKE_SOURCE_DIR@/ext/hera/geom_matching/wasserstein/include', + ['@HERA_WASSERSTEIN_INCLUDE_DIR@', get_pybind_include(False), get_pybind_include(True)], extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], )) -- cgit v1.2.3 From 09cf8752c50f25acac0eb1a6369624399431b2ca Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 30 Jan 2020 12:14:20 +0100 Subject: Document dependency on pybind11 --- src/python/doc/installation.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 40f3f44b..f8456799 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -257,6 +257,14 @@ The :doc:`Wasserstein distance ` module requires `POT `_, a library that provides several solvers for optimization problems related to Optimal Transport. +Pybind11 +======== + +The :doc:`Wasserstein distance ` module requires +`pybind11 `_, a library that provides +interoperability between C++ and Python, for its interface to `Hera +`_. + Scikit-learn ============ -- cgit v1.2.3 From 68b6e3f3d641cd4a1e86f08bff96e417cc17ac59 Mon Sep 17 00:00:00 2001 From: takeshimeonerespect Date: Fri, 31 Jan 2020 08:08:43 +0100 Subject: timedelay added on fork --- src/python/CMakeLists.txt | 5 +++ src/python/doc/point_cloud.rst | 7 ++++ src/python/gudhi/point_cloud/timedelay.py | 56 +++++++++++++++++++++++++++++++ src/python/test/test_point_cloud.py | 35 +++++++++++++++++++ 4 files changed, 103 insertions(+) create mode 100644 src/python/gudhi/point_cloud/timedelay.py create mode 100755 src/python/test/test_point_cloud.py diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b558d4c4..b23ec8a9 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -52,6 +52,7 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") add_gudhi_debug_info("Cython version ${CYTHON_VERSION}") @@ -221,6 +222,7 @@ endif(CGAL_FOUND) file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( OUTPUT gudhi.so @@ -399,6 +401,9 @@ endif(CGAL_FOUND) add_gudhi_py_test(test_representations) endif() + # Point cloud + add_gudhi_py_test(test_point_cloud) + # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) if(MATPLOTLIB_FOUND) diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index d668428a..55c74ff3 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -20,3 +20,10 @@ Subsampling :members: :special-members: :show-inheritance: + +TimeDelayEmbedding +------------------ + +.. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding + :members: + diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py new file mode 100644 index 00000000..5c7ba542 --- /dev/null +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -0,0 +1,56 @@ +import numpy as np + +class TimeDelayEmbedding: + """Point cloud transformation class. + + Embeds time-series data in the R^d according to Takens' Embedding Theorem + and obtains the coordinates of each point. + + Parameters + ---------- + dim : int, optional (default=3) + `d` of R^d to be embedded. + + delay : int, optional (default=1) + Time-Delay embedding. + + skip : int, optional (default=1) + How often to skip embedded points. + + """ + def __init__(self, dim=3, delay=1, skip=1): + self._dim = dim + self._delay = delay + self._skip = skip + + def __call__(self, *args, **kwargs): + return self.transform(*args, **kwargs) + + def _transform(self, ts): + """Guts of transform method.""" + return ts[ + np.add.outer( + np.arange(0, len(ts)-self._delay*(self._dim-1), self._skip), + np.arange(0, self._dim*self._delay, self._delay)) + ] + + def transform(self, ts): + """Transform method. + + Parameters + ---------- + ts : list[float] or list[list[float]] + A single or multiple time-series data. + + Returns + ------- + point clouds : list[list[float, float, float]] or list[list[list[float, float, float]]] + Makes point cloud every a single time-series data. + """ + ndts = np.array(ts) + if ndts.ndim == 1: + # for single. + return self._transform(ndts).tolist() + else: + # for multiple. + return np.apply_along_axis(self._transform, 1, ndts).tolist() diff --git a/src/python/test/test_point_cloud.py b/src/python/test/test_point_cloud.py new file mode 100755 index 00000000..2ee0c1fb --- /dev/null +++ b/src/python/test/test_point_cloud.py @@ -0,0 +1,35 @@ +from gudhi.point_cloud.timedelay import TimeDelayEmbedding + +def test_normal(): + # Sample array + ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + # Normal case. + prep = TimeDelayEmbedding() + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [2, 3, 4]) + assert (attractor[2] == [3, 4, 5]) + assert (attractor[3] == [4, 5, 6]) + assert (attractor[4] == [5, 6, 7]) + assert (attractor[5] == [6, 7, 8]) + assert (attractor[6] == [7, 8, 9]) + assert (attractor[7] == [8, 9, 10]) + # Delay = 3 + prep = TimeDelayEmbedding(delay=3) + attractor = prep(ts) + assert (attractor[0] == [1, 4, 7]) + assert (attractor[1] == [2, 5, 8]) + assert (attractor[2] == [3, 6, 9]) + assert (attractor[3] == [4, 7, 10]) + # Skip = 3 + prep = TimeDelayEmbedding(skip=3) + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [4, 5, 6]) + assert (attractor[2] == [7, 8, 9]) + # Delay = 2 / Skip = 2 + prep = TimeDelayEmbedding(delay=2, skip=2) + attractor = prep(ts) + assert (attractor[0] == [1, 3, 5]) + assert (attractor[1] == [3, 5, 7]) + assert (attractor[2] == [5, 7, 9]) -- cgit v1.2.3 From a145c7168fdb3f4205cb68870f06fc5cb8e08dea Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 31 Jan 2020 14:49:59 -0500 Subject: factorization of distance and kernel computations --- src/python/gudhi/representations/kernel_methods.py | 131 +++++++---- src/python/gudhi/representations/metrics.py | 247 +++++++++------------ 2 files changed, 193 insertions(+), 185 deletions(-) diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py index bfc83aff..bbbb7c31 100644 --- a/src/python/gudhi/representations/kernel_methods.py +++ b/src/python/gudhi/representations/kernel_methods.py @@ -9,13 +9,83 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin -from sklearn.metrics import pairwise_distances -from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance +from sklearn.metrics import pairwise_distances, pairwise_kernels +from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, sklearn_wrapper, pairwise_persistence_diagram_distances, sliced_wasserstein_distance, persistence_fisher_distance +from .preprocessing import Padding ############################################# # Kernel methods ############################ ############################################# +def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.): + """ + This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details. + :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + :param D2: (m x 2) numpy.array encoding the second diagram. + :param bandwidth: bandwidth of the Gaussian kernel with which persistence diagrams will be convolved + :param weight: weight function for the persistence diagram points. This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y]. + :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + :returns: the persistence weighted Gaussian kernel value between persistence diagrams. + :rtype: float + """ + ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))]) + ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))]) + if kernel_approx is not None: + approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0) + approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0) + return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T) + else: + W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:]) + E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth)) + return np.sum(np.multiply(W, E)) + +def persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): + """ + This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details. + :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + :param D2: (m x 2) numpy.array encoding the second diagram. + :param bandwidth: bandwidth of the Gaussian kernel with which persistence diagrams will be convolved + :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + :returns: the persistence scale space kernel value between persistence diagrams. + :rtype: float + """ + DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0) + DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0) + weight_pss = lambda x: 1 if x[1] >= x[0] else -1 + return 0.5 * persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth) + +def pairwise_persistence_diagram_kernels(X, Y=None, metric="sliced_wasserstein", **kwargs): + """ + This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). + :param X: first list of persistence diagrams. + :param Y: second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only. + :param metric: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + :returns: kernel matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) + :rtype: float + """ + if Y is None: + YY = None + pX = Padding(use=True).fit_transform(X) + diag_len = len(pX[0]) + XX = np.reshape(np.vstack(pX), [-1, diag_len*3]) + else: + nX, nY = len(X), len(Y) + pD = Padding(use=True).fit_transform(X + Y) + diag_len = len(pD[0]) + XX = np.reshape(np.vstack(pD[:nX]), [-1, diag_len*3]) + YY = np.reshape(np.vstack(pD[nX:]), [-1, diag_len*3]) + + if metric == "sliced_wasserstein": + return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"]) + elif metric == "persistence_fisher": + return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"]) + elif metric == "persistence_scale_space": + return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_scale_space_kernel, **kwargs)) + elif metric == "persistence_weighted_gaussian": + return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_weighted_gaussian_kernel, **kwargs)) + else: + return pairwise_kernels(XX, YY, metric=sklearn_wrapper(metric, **kwargs)) + class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): """ This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details. @@ -29,7 +99,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10). """ self.bandwidth = bandwidth - self.sw_ = SlicedWassersteinDistance(num_directions=num_directions) + self.num_directions = num_directions def fit(self, X, y=None): """ @@ -39,7 +109,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): X (list of n x 2 numpy arrays): input persistence diagrams. y (n x 1 array): persistence diagram labels (unused). """ - self.sw_.fit(X, y) + self.diagrams_ = X return self def transform(self, X): @@ -52,7 +122,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values. """ - return np.exp(-self.sw_.transform(X)/self.bandwidth) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions) class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin): """ @@ -78,10 +148,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin): X (list of n x 2 numpy arrays): input persistence diagrams. y (n x 1 array): persistence diagram labels (unused). """ - self.diagrams_ = list(X) - self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ] - if self.kernel_approx is not None: - self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))]) + self.diagrams_ = X return self def transform(self, X): @@ -94,31 +161,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values. """ - Xp = list(X) - Xfit = np.zeros((len(Xp), len(self.diagrams_))) - if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]): - if self.kernel_approx is not None: - Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T) - else: - for i in range(len(self.diagrams_)): - for j in range(i+1, len(self.diagrams_)): - W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:]) - E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth))) - Xfit[i,j] = np.sum(np.multiply(W, E)) - Xfit[j,i] = Xfit[i,j] - else: - ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ] - if self.kernel_approx is not None: - approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))]) - Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T) - else: - for i in range(len(Xp)): - for j in range(len(self.diagrams_)): - W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:]) - E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth))) - Xfit[i,j] = np.sum(np.multiply(W, E)) - - return Xfit + return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx) class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): """ @@ -132,7 +175,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.) kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). """ - self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx) + self.bandwidth, self.kernel_approx = bandwidth, kernel_approx def fit(self, X, y=None): """ @@ -142,11 +185,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): X (list of n x 2 numpy arrays): input persistence diagrams. y (n x 1 array): persistence diagram labels (unused). """ - self.diagrams_ = list(X) - for i in range(len(self.diagrams_)): - op_D = self.diagrams_[i][:,[1,0]] - self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0) - self.pwg_.fit(X) + self.diagrams_ = X return self def transform(self, X): @@ -159,11 +198,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values. """ - Xp = list(X) - for i in range(len(Xp)): - op_X = Xp[i][:,[1,0]] - Xp[i] = np.concatenate([Xp[i], op_X], axis=0) - return self.pwg_.transform(Xp) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) class PersistenceFisherKernel(BaseEstimator, TransformerMixin): """ @@ -179,7 +214,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin): kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). """ self.bandwidth = bandwidth - self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx) + self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx def fit(self, X, y=None): """ @@ -189,7 +224,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin): X (list of n x 2 numpy arrays): input persistence diagrams. y (n x 1 array): persistence diagram labels (unused). """ - self.pf_.fit(X, y) + self.diagrams_ = X return self def transform(self, X): @@ -202,5 +237,5 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values. """ - return np.exp(-self.pf_.transform(X)/self.bandwidth) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 290c1d07..cc788994 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -11,6 +11,8 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances from gudhi.wasserstein import wasserstein_distance +from .preprocessing import Padding + try: from .. import bottleneck_distance USE_GUDHI = True @@ -22,6 +24,108 @@ except ImportError: # Metrics ################################### ############################################# +def sliced_wasserstein_distance(D1, D2, num_directions): + """ + This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. + :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + :param D2: (m x 2) numpy.array encoding the second diagram. + :param num_directions: number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. + :returns: the sliced Wasserstein distance between persistence diagrams. + :rtype: float + """ + thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] + lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) + approx1 = np.matmul(D1, lines) + diag_proj1 = (1./2) * np.ones((2,2)) + approx_diag1 = np.matmul(np.matmul(D1, diag_proj1), lines) + approx2 = np.matmul(D2, lines) + diag_proj2 = (1./2) * np.ones((2,2)) + approx_diag2 = np.matmul(np.matmul(D2, diag_proj2), lines) + A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0) + B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0) + L1 = np.sum(np.abs(A-B), axis=0) + return np.mean(L1) + +def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): + """ + This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. + :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + :param D2: (m x 2) numpy.array encoding the second diagram. + :param bandwidth: bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions. + :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + :returns: the persistence Fisher distance between persistence diagrams. + :rtype: float + """ + projection = (1./2) * np.ones((2,2)) + diagonal_projections1 = np.matmul(D1, projection) + diagonal_projections2 = np.matmul(D2, projection) + if kernel_approx is not None: + approx1 = kernel_approx.transform(D1) + approx_diagonal1 = kernel_approx.transform(diagonal_projections1) + approx2 = kernel_approx.transform(D2) + approx_diagonal2 = kernel_approx.transform(diagonal_projections2) + Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0) + U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0) + vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T)) + vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) + if vectori_sum != 0: + vectori = vectori/vectori_sum + if vectorj_sum != 0: + vectorj = vectorj/vectorj_sum + return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) + else: + Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0) + U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0) + vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1) + vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1) + vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) + if vectori_sum != 0: + vectori = vectori/vectori_sum + if vectorj_sum != 0: + vectorj = vectorj/vectorj_sum + return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) + +def sklearn_wrapper(metric, **kwargs): + """ + This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments. It turns the metric into another that takes flattened and padded diagrams as inputs. + """ + def flat_metric(D1, D2): + DD1, DD2 = np.reshape(D1, [-1,3]), np.reshape(D2, [-1,3]) + return metric(DD1[DD1[:,2]==1,0:2], DD2[DD2[:,2]==1,0:2], **kwargs) + return flat_metric + +def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs): + """ + This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). + :param X: first list of persistence diagrams. + :param Y: second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. + :param metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + :returns: distance matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) + :rtype: float + """ + if Y is None: + YY = None + pX = Padding(use=True).fit_transform(X) + diag_len = len(pX[0]) + XX = np.reshape(np.vstack(pX), [-1, diag_len*3]) + else: + nX, nY = len(X), len(Y) + pD = Padding(use=True).fit_transform(X + Y) + diag_len = len(pD[0]) + XX = np.reshape(np.vstack(pD[:nX]), [-1, diag_len*3]) + YY = np.reshape(np.vstack(pD[nX:]), [-1, diag_len*3]) + + if metric == "bottleneck": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, **kwargs)) + elif metric == "wasserstein": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(wasserstein_distance, **kwargs)) + elif metric == "sliced_wasserstein": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, **kwargs)) + elif metric == "persistence_fisher": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(persistence_fisher_distance, **kwargs)) + else: + return pairwise_distances(XX, YY, metric=sklearn_wrapper(metric, **kwargs)) + class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. @@ -34,8 +138,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10). """ self.num_directions = num_directions - thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1] - self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) def fit(self, X, y=None): """ @@ -46,9 +148,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): y (n x 1 array): persistence diagram labels (unused). """ self.diagrams_ = X - self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))] - diag_proj = (1./2) * np.ones((2,2)) - self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))] return self def transform(self, X): @@ -61,27 +160,7 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances. """ - Xfit = np.zeros((len(X), len(self.approx_))) - if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): - for i in range(len(self.approx_)): - for j in range(i+1, len(self.approx_)): - A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0) - B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0) - L1 = np.sum(np.abs(A-B), axis=0) - Xfit[i,j] = np.mean(L1) - Xfit[j,i] = Xfit[i,j] - else: - diag_proj = (1./2) * np.ones((2,2)) - approx = [np.matmul(X[i], self.lines_) for i in range(len(X))] - approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))] - for i in range(len(approx)): - for j in range(len(self.approx_)): - A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0) - B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0) - L1 = np.sum(np.abs(A-B), axis=0) - Xfit[i,j] = np.mean(L1) - - return Xfit + return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions) class BottleneckDistance(BaseEstimator, TransformerMixin): """ @@ -117,33 +196,9 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances. """ - num_diag1 = len(X) - - #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): - if X is self.diagrams_: - matrix = np.zeros((num_diag1, num_diag1)) - - if USE_GUDHI: - for i in range(num_diag1): - for j in range(i+1, num_diag1): - matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon) - matrix[j,i] = matrix[i,j] - else: - print("Gudhi built without CGAL: returning a null matrix") - - else: - num_diag2 = len(self.diagrams_) - matrix = np.zeros((num_diag1, num_diag2)) - - if USE_GUDHI: - for i in range(num_diag1): - for j in range(num_diag2): - matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon) - else: - print("Gudhi built without CGAL: returning a null matrix") - - Xfit = matrix - + if not USE_GUDHI: + print("Gudhi built without CGAL: returning a null matrix") + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) if USE_GUDHI else np.zeros((len(X), len(self.diagrams_))) return Xfit class WassersteinDistance(BaseEstimator, TransformerMixin): @@ -181,28 +236,7 @@ class WassersteinDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances. """ - num_diag1 = len(X) - - #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): - if X is self.diagrams_: - matrix = np.zeros((num_diag1, num_diag1)) - - for i in range(num_diag1): - for j in range(i+1, num_diag1): - matrix[i,j] = wasserstein_distance(X[i], X[j], self.order, self.internal_p) - matrix[j,i] = matrix[i,j] - - else: - num_diag2 = len(self.diagrams_) - matrix = np.zeros((num_diag1, num_diag2)) - - for i in range(num_diag1): - for j in range(num_diag2): - matrix[i,j] = wasserstein_distance(X[i], self.diagrams_[j], self.order, self.internal_p) - - Xfit = matrix - - return Xfit + return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="wasserstein", order=self.order, internal_p=self.internal_p) class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ @@ -227,11 +261,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin): y (n x 1 array): persistence diagram labels (unused). """ self.diagrams_ = X - projection = (1./2) * np.ones((2,2)) - self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))] - if self.kernel_approx is not None: - self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))] - self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))] return self def transform(self, X): @@ -244,60 +273,4 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances. """ - Xfit = np.zeros((len(X), len(self.diagrams_))) - if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): - for i in range(len(self.diagrams_)): - for j in range(i+1, len(self.diagrams_)): - if self.kernel_approx is not None: - Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0) - U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0) - vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T)) - vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) - if vectori_sum != 0: - vectori = vectori/vectori_sum - if vectorj_sum != 0: - vectorj = vectorj/vectorj_sum - Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) - Xfit[j,i] = Xfit[i,j] - else: - Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0) - U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0) - vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1) - vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1) - vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) - if vectori_sum != 0: - vectori = vectori/vectori_sum - if vectorj_sum != 0: - vectorj = vectorj/vectorj_sum - Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) - Xfit[j,i] = Xfit[i,j] - else: - projection = (1./2) * np.ones((2,2)) - diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))] - if self.kernel_approx is not None: - approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))] - approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))] - for i in range(len(X)): - for j in range(len(self.diagrams_)): - if self.kernel_approx is not None: - Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0) - U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0) - vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T)) - vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) - if vectori_sum != 0: - vectori = vectori/vectori_sum - if vectorj_sum != 0: - vectorj = vectorj/vectorj_sum - Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) - else: - Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0) - U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0) - vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1) - vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1) - vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj) - if vectori_sum != 0: - vectori = vectori/vectori_sum - if vectorj_sum != 0: - vectorj = vectorj/vectorj_sum - Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) - return Xfit + return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) -- cgit v1.2.3 From 1dd1c554a962db70809eadb470eb2eaa733970d4 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 31 Jan 2020 14:59:32 -0500 Subject: revert first commit --- .../diagram_vectorizations_distances_kernels.py | 7 +-- src/python/gudhi/representations/metrics.py | 59 ---------------------- 2 files changed, 1 insertion(+), 65 deletions(-) diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index 66c32cc2..119072eb 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -9,7 +9,7 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet TopologicalVector, DiagramScaler, BirthPersistenceTransform,\ PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\ - SlicedWassersteinKernel, BottleneckDistance, WassersteinDistance, PersistenceFisherKernel + SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]]) diags = [D] @@ -117,11 +117,6 @@ X = SW.fit(diags) Y = SW.transform(diags2) print("SW kernel is " + str(Y[0][0])) -W = WassersteinDistance(order=2, internal_p=2) -X = W.fit(diags) -Y = W.transform(diags2) -print("Wasserstein distance is " + str(Y[0][0])) - W = BottleneckDistance(epsilon=.001) X = W.fit(diags) Y = W.transform(diags2) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 290c1d07..5f9ec6ab 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -10,7 +10,6 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances -from gudhi.wasserstein import wasserstein_distance try: from .. import bottleneck_distance USE_GUDHI = True @@ -146,64 +145,6 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): return Xfit -class WassersteinDistance(BaseEstimator, TransformerMixin): - """ - This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams. - """ - def __init__(self, order=2, internal_p=2): - """ - Constructor for the WassersteinDistance class. - - Parameters: - order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`. - internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`. - """ - self.order, self.internal_p = order, internal_p - - def fit(self, X, y=None): - """ - Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**. - - Parameters: - X (list of n x 2 numpy arrays): input persistence diagrams. - y (n x 1 array): persistence diagram labels (unused). - """ - self.diagrams_ = X - return self - - def transform(self, X): - """ - Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams. - - Parameters: - X (list of n x 2 numpy arrays): input persistence diagrams. - - Returns: - numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances. - """ - num_diag1 = len(X) - - #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]): - if X is self.diagrams_: - matrix = np.zeros((num_diag1, num_diag1)) - - for i in range(num_diag1): - for j in range(i+1, num_diag1): - matrix[i,j] = wasserstein_distance(X[i], X[j], self.order, self.internal_p) - matrix[j,i] = matrix[i,j] - - else: - num_diag2 = len(self.diagrams_) - matrix = np.zeros((num_diag1, num_diag2)) - - for i in range(num_diag1): - for j in range(num_diag2): - matrix[i,j] = wasserstein_distance(X[i], self.diagrams_[j], self.order, self.internal_p) - - Xfit = matrix - - return Xfit - class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. -- cgit v1.2.3 From f2020f6bb3a4d2bbd774aa630151ef1db53ac4f8 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Sun, 2 Feb 2020 15:23:03 -0500 Subject: fixed Marc's comments --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 4786b244..301f7aae 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1471,7 +1471,7 @@ class Simplex_tree { * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration and this->get_persistence. * @return A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. */ - std::vector>>> convert(const std::vector>>& dgm){ + std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ std::vector>>> new_dgm(4); double x, y; for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; double px = dgm[i].second.first; double py = dgm[i].second.second; if(std::isinf(py)) continue; @@ -1487,7 +1487,7 @@ class Simplex_tree { return new_dgm; } - /** \brief Extend filtration for computing extended persistence. + /** \brief Extend filtration for computing extended persistence. This function only uses the filtration values at the 0-dimensional simplices, and computes the extended persistence diagram induced by the lower-star filtration computed with these values. Note that after calling this function, the filtration values are actually modified. The function compute_extended_persistence_subdiagrams retrieves the original values and separates the extended persistence diagram points w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after computing the persistent homology of the extended simplicial complex. */ void extend_filtration() { -- cgit v1.2.3 From d6afaa8300daa6204282a7d34df6bea33ea59fd2 Mon Sep 17 00:00:00 2001 From: takeshimeonerespect <58589594+takeshimeonerespect@users.noreply.github.com> Date: Mon, 3 Feb 2020 14:13:52 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index 5c7ba542..f283916d 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -1,3 +1,11 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Martin Royer, Yuichi Ike, Masatoshi Takenouchi +# +# Copyright (C) 2020 Inria, Copyright (C) 2020 Fujitsu Laboratories Ltd. +# Modification(s): +# - YYYY/MM Author: Description of the modification + import numpy as np class TimeDelayEmbedding: -- cgit v1.2.3 From eded147ffffe5b7143cad19ecd134fb7a63991a3 Mon Sep 17 00:00:00 2001 From: takenouchi Date: Tue, 4 Feb 2020 14:08:19 +0900 Subject: change a file name --- src/python/test/test_time_delay.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100755 src/python/test/test_time_delay.py diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py new file mode 100755 index 00000000..2ee0c1fb --- /dev/null +++ b/src/python/test/test_time_delay.py @@ -0,0 +1,35 @@ +from gudhi.point_cloud.timedelay import TimeDelayEmbedding + +def test_normal(): + # Sample array + ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + # Normal case. + prep = TimeDelayEmbedding() + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [2, 3, 4]) + assert (attractor[2] == [3, 4, 5]) + assert (attractor[3] == [4, 5, 6]) + assert (attractor[4] == [5, 6, 7]) + assert (attractor[5] == [6, 7, 8]) + assert (attractor[6] == [7, 8, 9]) + assert (attractor[7] == [8, 9, 10]) + # Delay = 3 + prep = TimeDelayEmbedding(delay=3) + attractor = prep(ts) + assert (attractor[0] == [1, 4, 7]) + assert (attractor[1] == [2, 5, 8]) + assert (attractor[2] == [3, 6, 9]) + assert (attractor[3] == [4, 7, 10]) + # Skip = 3 + prep = TimeDelayEmbedding(skip=3) + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [4, 5, 6]) + assert (attractor[2] == [7, 8, 9]) + # Delay = 2 / Skip = 2 + prep = TimeDelayEmbedding(delay=2, skip=2) + attractor = prep(ts) + assert (attractor[0] == [1, 3, 5]) + assert (attractor[1] == [3, 5, 7]) + assert (attractor[2] == [5, 7, 9]) -- cgit v1.2.3 From 5ddb724824798fe194a66285e29ea4c5cc2713e2 Mon Sep 17 00:00:00 2001 From: takeshimeonerespect Date: Tue, 4 Feb 2020 14:24:27 +0900 Subject: Delete test_point_cloud.py --- src/python/test/test_point_cloud.py | 35 ----------------------------------- 1 file changed, 35 deletions(-) delete mode 100755 src/python/test/test_point_cloud.py diff --git a/src/python/test/test_point_cloud.py b/src/python/test/test_point_cloud.py deleted file mode 100755 index 2ee0c1fb..00000000 --- a/src/python/test/test_point_cloud.py +++ /dev/null @@ -1,35 +0,0 @@ -from gudhi.point_cloud.timedelay import TimeDelayEmbedding - -def test_normal(): - # Sample array - ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - # Normal case. - prep = TimeDelayEmbedding() - attractor = prep(ts) - assert (attractor[0] == [1, 2, 3]) - assert (attractor[1] == [2, 3, 4]) - assert (attractor[2] == [3, 4, 5]) - assert (attractor[3] == [4, 5, 6]) - assert (attractor[4] == [5, 6, 7]) - assert (attractor[5] == [6, 7, 8]) - assert (attractor[6] == [7, 8, 9]) - assert (attractor[7] == [8, 9, 10]) - # Delay = 3 - prep = TimeDelayEmbedding(delay=3) - attractor = prep(ts) - assert (attractor[0] == [1, 4, 7]) - assert (attractor[1] == [2, 5, 8]) - assert (attractor[2] == [3, 6, 9]) - assert (attractor[3] == [4, 7, 10]) - # Skip = 3 - prep = TimeDelayEmbedding(skip=3) - attractor = prep(ts) - assert (attractor[0] == [1, 2, 3]) - assert (attractor[1] == [4, 5, 6]) - assert (attractor[2] == [7, 8, 9]) - # Delay = 2 / Skip = 2 - prep = TimeDelayEmbedding(delay=2, skip=2) - attractor = prep(ts) - assert (attractor[0] == [1, 3, 5]) - assert (attractor[1] == [3, 5, 7]) - assert (attractor[2] == [5, 7, 9]) -- cgit v1.2.3 From 360cc2cc31e9e81b99f5c21aa2b4e79b066baabf Mon Sep 17 00:00:00 2001 From: mathieu Date: Tue, 4 Feb 2020 19:44:52 -0500 Subject: fixed Vincent's comments --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 74 ++++++++++++++++++----- src/python/gudhi/simplex_tree.pxd | 2 +- src/python/gudhi/simplex_tree.pyx | 14 +++-- src/python/test/test_simplex_tree.py | 86 +++++++++++++++++++++++++-- 4 files changed, 150 insertions(+), 26 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 301f7aae..42cf4246 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1467,34 +1467,68 @@ class Simplex_tree { } } - /** \brief Retrieve good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. Need extend_filtration to be called first! - * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration and this->get_persistence. - * @return A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. + /** \brief Retrieve good values for extended persistence, and separate the + * diagrams into the ordinary, relative, extended+ and extended- subdiagrams. + * Need extend_filtration to be called first! + * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration + * and this->get_persistence. + * @return A vector of four persistence diagrams. The first one is Ordinary, the + * second one is Relative, the third one is Extended+ and the fourth one is Extended-. */ std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ - std::vector>>> new_dgm(4); double x, y; - for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; double px = dgm[i].second.first; double py = dgm[i].second.second; + std::vector>>> new_dgm(4); + double x, y; + for(unsigned int i = 0; i < dgm.size(); i++){ + int h = dgm[i].first; + double px = dgm[i].second.first; + double py = dgm[i].second.second; if(std::isinf(py)) continue; else{ - if ((px <= -1) & (py <= -1)){x = minval_ + (maxval_-minval_)*(px + 2); y = minval_ + (maxval_-minval_)*(py + 2); new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); } - if ((px >= 1) & (py >= 1)){x = minval_ - (maxval_-minval_)*(px - 2); y = minval_ - (maxval_-minval_)*(py - 2); new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); } - if ((px <= -1) & (py >= 1)){x = minval_ + (maxval_-minval_)*(px + 2); y = minval_ - (maxval_-minval_)*(py - 2); - if (x <= y) new_dgm[2].push_back(std::make_pair(h, std::make_pair(x,y))); - else new_dgm[3].push_back(std::make_pair(h, std::make_pair(x,y))); + if ((px <= -1) & (py <= -1)){ + x = minval_ + (maxval_-minval_)*(px + 2); + y = minval_ + (maxval_-minval_)*(py + 2); + new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); + } + if ((px >= 1) & (py >= 1)){ + x = minval_ - (maxval_-minval_)*(px - 2); + y = minval_ - (maxval_-minval_)*(py - 2); + new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); + } + if ((px <= -1) & (py >= 1)){ + x = minval_ + (maxval_-minval_)*(px + 2); + y = minval_ - (maxval_-minval_)*(py - 2); + if (x <= y){ + new_dgm[2].push_back(std::make_pair(h, std::make_pair(x,y))); + } + else{ + new_dgm[3].push_back(std::make_pair(h, std::make_pair(x,y))); + } } } } return new_dgm; } - /** \brief Extend filtration for computing extended persistence. This function only uses the filtration values at the 0-dimensional simplices, and computes the extended persistence diagram induced by the lower-star filtration computed with these values. Note that after calling this function, the filtration values are actually modified. The function compute_extended_persistence_subdiagrams retrieves the original values and separates the extended persistence diagram points w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after computing the persistent homology of the extended simplicial complex. + /** \brief Extend filtration for computing extended persistence. + * This function only uses the filtration values at the 0-dimensional simplices, + * and computes the extended persistence diagram induced by the lower-star filtration + * computed with these values. Note that after calling this function, the filtration + * values are actually modified. The function compute_extended_persistence_subdiagrams + * retrieves the original values and separates the extended persistence diagram points + * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after + * computing the persistent homology of the extended simplicial complex. */ void extend_filtration() { // Compute maximum and minimum of filtration values - int maxvert = -std::numeric_limits::infinity(); + int maxvert = -std::numeric_limits::infinity(); std::vector filt; - for (auto sh : this->complex_simplex_range()) {if (this->dimension(sh) == 0){filt.push_back(this->filtration(sh)); maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert);}} + for (auto sh : this->complex_simplex_range()) { + if (this->dimension(sh) == 0){ + filt.push_back(this->filtration(sh)); + maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert); + } + } minval_ = *std::min_element(filt.begin(), filt.end()); maxval_ = *std::max_element(filt.begin(), filt.end()); maxvert += 1; @@ -1502,13 +1536,20 @@ class Simplex_tree { // Compute vectors of integers corresponding to the Simplex handles std::vector > splxs; for (auto sh : this->complex_simplex_range()) { - std::vector vr; for (auto vh : this->simplex_vertex_range(sh)){vr.push_back(vh);} + std::vector vr; + for (auto vh : this->simplex_vertex_range(sh)){ + vr.push_back(vh); + } splxs.push_back(vr); } // Add point for coning the simplicial complex int count = this->num_simplices(); - std::vector cone; cone.push_back(maxvert); auto ins = this->insert_simplex(cone, -3); this->assign_key(ins.first, count); count++; + std::vector cone; + cone.push_back(maxvert); + auto ins = this->insert_simplex(cone, -3); + this->assign_key(ins.first, count); + count++; // For each simplex for (auto vr : splxs){ @@ -1531,7 +1572,8 @@ class Simplex_tree { count++; } - this->make_filtration_non_decreasing(); this->initialize_filtration(); + this->make_filtration_non_decreasing(); + this->initialize_filtration(); } diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 4393047f..7aa16926 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -44,7 +44,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() void extend_filtration() - vector[vector[pair[int, pair[double, double]]]] convert(vector[pair[int, pair[double, double]]]) + vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]]) cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index cfab14f4..e429e28a 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -387,17 +387,21 @@ cdef class SimplexTree: return self.get_ptr().make_filtration_non_decreasing() def extend_filtration(self): - """ This function extends filtration for computing extended persistence. + """ Extend filtration for computing extended persistence. This function only uses the filtration values at the 0-dimensional simplices, and computes the extended persistence diagram induced by the lower-star filtration computed with these values. Note that after calling this function, the filtration values are actually modified. The function :func:`compute_extended_persistence_subdiagrams()` retrieves the original values and separates the extended persistence diagram points w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after computing the persistent homology of the extended simplicial complex. """ return self.get_ptr().extend_filtration() - def convert(self, dgm): - """This function retrieves good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. Need extend_filtration to be called first! + def compute_extended_persistence_subdiagrams(self, dgm): + """This function retrieves good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. - :param dgm: Persistence diagram obtained after calling this->extend_filtration and this->get_persistence. + :param dgm: Persistence diagram obtained after calling :func:`extend_filtration()` and :func:`persistence()`. :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. + + .. note:: + + This function should be called only after calling :func:`extend_filtration()` and :func:`persistence()`. """ - return self.get_ptr().convert(dgm) + return self.get_ptr().compute_extended_persistence_subdiagrams(dgm) def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 1822c43b..7e3d843e 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -244,7 +244,85 @@ def test_make_filtration_non_decreasing(): assert st.filtration([0, 1, 6]) == 1.0 assert st.filtration([0, 1]) == 1.0 assert st.filtration([0]) == 1.0 - assert st.filtration([1]) == 1.0 - assert st.filtration([3, 4, 5]) == 2.0 - assert st.filtration([3, 4]) == 2.0 - assert st.filtration([4, 5]) == 2.0 + +def test_extend_filtration(): + + # Inserted simplex: + # 5 4 + # o o + # / \ / + # o o + # /2\ /3 + # o o + # 1 0 + + st = SimplexTree() + st.insert([0,2]) + st.insert([1,2]) + st.insert([0,3]) + st.insert([2,5]) + st.insert([3,4]) + st.insert([3,5]) + st.assign_filtration([0], 1.) + st.assign_filtration([1], 2.) + st.assign_filtration([2], 3.) + st.assign_filtration([3], 4.) + st.assign_filtration([4], 5.) + st.assign_filtration([5], 6.) + + assert st.get_filtration() == [ + ([0, 2], 0.0), + ([1, 2], 0.0), + ([0, 3], 0.0), + ([3, 4], 0.0), + ([2, 5], 0.0), + ([3, 5], 0.0), + ([0], 1.0), + ([1], 2.0), + ([2], 3.0), + ([3], 4.0), + ([4], 5.0), + ([5], 6.0) + ] + + + st.extend_filtration() + + assert st.get_filtration() == [ + ([6], -3.0), + ([0], -2.0), + ([1], -1.8), + ([2], -1.6), + ([0, 2], -1.6), + ([1, 2], -1.6), + ([3], -1.4), + ([0, 3], -1.4), + ([4], -1.2), + ([3, 4], -1.2), + ([5], -1.0), + ([2, 5], -1.0), + ([3, 5], -1.0), + ([5, 6], 1.0), + ([4, 6], 1.2), + ([3, 6], 1.4), + ([3, 4, 6], 1.4), + ([3, 5, 6], 1.4), + ([2, 6], 1.6), + ([2, 5, 6], 1.6), + ([1, 6], 1.8), + ([1, 2, 6], 1.8), + ([0, 6], 2.0), + ([0, 2, 6], 2.0), + ([0, 3, 6], 2.0) + ] + + + dgm = st.persistence() + L = st.compute_extended_persistence_subdiagrams(dgm) + assert L == [ + [(0, (1.9999999999999998, 2.9999999999999996))], + [(1, (5.0, 4.0))], + [(0, (1.0, 6.0))], + [(1, (6.0, 1.0))] + ] + -- cgit v1.2.3 From c6b5f941e94fdabb3649637d195d3d85c645796b Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 5 Feb 2020 10:34:18 +0100 Subject: tbb::mutex was still there --- src/Nerve_GIC/include/gudhi/GIC.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h index 61db6508..348dcc85 100644 --- a/src/Nerve_GIC/include/gudhi/GIC.h +++ b/src/Nerve_GIC/include/gudhi/GIC.h @@ -895,7 +895,7 @@ class Cover_complex { // Compute the geodesic distances to subsamples with Dijkstra #ifdef GUDHI_USE_TBB if (verbose) std::clog << "Computing geodesic distances (parallelized)..." << std::endl; - std::mutex coverMutex; tbb::mutex mindistMutex; + std::mutex coverMutex; std::mutex mindistMutex; tbb::parallel_for(0, m, [&](int i){ int seed = voronoi_subsamples[i]; std::vector dmap(n); -- cgit v1.2.3 From 596355344e6205d02110e38a0cb7e0a94e8dbd27 Mon Sep 17 00:00:00 2001 From: takenouchi Date: Thu, 6 Feb 2020 16:00:47 +0900 Subject: modify CMakeLists.txt --- src/python/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b23ec8a9..798e2907 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -401,8 +401,8 @@ endif(CGAL_FOUND) add_gudhi_py_test(test_representations) endif() - # Point cloud - add_gudhi_py_test(test_point_cloud) + # Time Delay + add_gudhi_py_test(test_time_delay) # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) -- cgit v1.2.3 From 24a76cc53c935dee93f2367f176143c015009e3f Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 6 Feb 2020 10:51:43 +0100 Subject: Use exceptions uinstead of error message for non existing files --- ...ex_diagram_persistence_from_off_file_example.py | 14 ++++++++----- .../alpha_rips_persistence_bottleneck_distance.py | 24 +++++++++++++--------- ...ex_diagram_persistence_from_off_file_example.py | 20 +++++++++++------- ...ex_diagram_persistence_from_off_file_example.py | 12 +++++++---- ...arcode_persistence_from_perseus_file_example.py | 17 +++++++++------ ...ex_diagram_persistence_from_off_file_example.py | 17 +++++++++------ ...complex_plain_homology_from_off_file_example.py | 19 ++++++++++------- src/python/gudhi/alpha_complex.pyx | 10 +++++---- src/python/gudhi/cubical_complex.pyx | 11 ++++++---- src/python/gudhi/off_reader.pyx | 12 ++++++----- 10 files changed, 98 insertions(+), 58 deletions(-) diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py index 6afaf533..727af4fa 100755 --- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py @@ -1,12 +1,15 @@ #!/usr/bin/env python import argparse +import errno +import os import matplotlib.pyplot as plot -import sys import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -42,7 +45,7 @@ args = parser.parse_args() with open(args.file, "r") as f: first_line = f.readline() if (first_line == "OFF\n") or (first_line == "nOFF\n"): - print("#####################################################################") + print("##############################################################") print("AlphaComplex creation from points read in a OFF file") message = "AlphaComplex with max_edge_length=" + repr(args.max_alpha_square) @@ -65,6 +68,7 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) f.close() diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py index 7b4aa3e7..f156826d 100755 --- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py +++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py @@ -3,10 +3,13 @@ import gudhi import argparse import math -import sys +import errno +import os -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -37,7 +40,7 @@ with open(args.file, "r") as f: first_line = f.readline() if (first_line == "OFF\n") or (first_line == "nOFF\n"): point_cloud = gudhi.read_points_from_off_file(off_file=args.file) - print("#####################################################################") + print("##############################################################") print("RipsComplex creation from points read in a OFF file") message = "RipsComplex with max_edge_length=" + repr(args.threshold) @@ -47,14 +50,15 @@ with open(args.file, "r") as f: points=point_cloud, max_edge_length=args.threshold ) - rips_stree = rips_complex.create_simplex_tree(max_dimension=args.max_dimension) + rips_stree = rips_complex.create_simplex_tree( + max_dimension=args.max_dimension) message = "Number of simplices=" + repr(rips_stree.num_simplices()) print(message) rips_diag = rips_stree.persistence() - print("#####################################################################") + print("##############################################################") print("AlphaComplex creation from points read in a OFF file") message = "AlphaComplex with max_edge_length=" + repr(args.threshold) @@ -94,13 +98,13 @@ with open(args.file, "r") as f: print(message) max_b_distance = max(bottleneck_distance, max_b_distance) - print( - "================================================================================" - ) + print("==============================================================") message = "Bottleneck distance is " + repr(max_b_distance) print(message) else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) + f.close() diff --git a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py index f61d692b..e1e572df 100755 --- a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py @@ -1,12 +1,15 @@ #!/usr/bin/env python import argparse +import errno +import os import matplotlib.pyplot as plot -import sys import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -45,8 +48,9 @@ args = parser.parse_args() with open(args.file, "r") as f: first_line = f.readline() if (first_line == "OFF\n") or (first_line == "nOFF\n"): - print("#####################################################################") - print("EuclideanStrongWitnessComplex creation from points read in a OFF file") + print("##############################################################") + print("EuclideanStrongWitnessComplex creation from points read "\ + "in a OFF file") witnesses = gudhi.read_points_from_off_file(off_file=args.file) landmarks = gudhi.pick_n_random_points( @@ -65,7 +69,8 @@ with open(args.file, "r") as f: witnesses=witnesses, landmarks=landmarks ) simplex_tree = witness_complex.create_simplex_tree( - max_alpha_square=args.max_alpha_square, limit_dimension=args.limit_dimension + max_alpha_square=args.max_alpha_square, + limit_dimension=args.limit_dimension ) message = "Number of simplices=" + repr(simplex_tree.num_simplices()) @@ -80,6 +85,7 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) f.close() diff --git a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py index aaa03dad..58cb2bb5 100755 --- a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py @@ -1,12 +1,15 @@ #!/usr/bin/env python import argparse +import errno +import os import matplotlib.pyplot as plot -import sys import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -79,6 +82,7 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) f.close() diff --git a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py index 97bfd49f..499171df 100755 --- a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py +++ b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py @@ -2,11 +2,14 @@ import argparse import matplotlib.pyplot as plot -import sys +import errno +import os import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -58,9 +61,10 @@ parser.add_argument( args = parser.parse_args() if is_file_perseus(args.file): - print("#####################################################################") + print("##################################################################") print("PeriodicCubicalComplex creation") - periodic_cubical_complex = gudhi.PeriodicCubicalComplex(perseus_file=args.file) + periodic_cubical_complex = gudhi.PeriodicCubicalComplex( + perseus_file=args.file) print("persistence(homology_coeff_field=3, min_persistence=0)=") diag = periodic_cubical_complex.persistence( @@ -74,4 +78,5 @@ if is_file_perseus(args.file): gudhi.plot_persistence_barcode(diag) plot.show() else: - print(args.file, "is not a valid perseus style file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) diff --git a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py index 5d8f057b..6f992508 100755 --- a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py +++ b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py @@ -1,12 +1,15 @@ #!/usr/bin/env python import argparse +import errno +import os import matplotlib.pyplot as plot -import sys import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -43,10 +46,11 @@ args = parser.parse_args() with open(args.file, "r") as f: first_line = f.readline() if (first_line == "OFF\n") or (first_line == "nOFF\n"): - print("#####################################################################") + print("##############################################################") print("RipsComplex creation from points read in a OFF file") - message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length) + message = "RipsComplex with max_edge_length=" + \ + repr(args.max_edge_length) print(message) point_cloud = gudhi.read_points_from_off_file(off_file=args.file) @@ -69,6 +73,7 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) f.close() diff --git a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py index 77ac2ea7..85bade4a 100755 --- a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py +++ b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py @@ -1,12 +1,15 @@ #!/usr/bin/env python import argparse +import errno +import os import matplotlib.pyplot as plot -import sys import gudhi -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - + which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full + license details. Author(s): Vincent Rouvreau Copyright (C) 2016 Inria @@ -20,7 +23,7 @@ __copyright__ = "Copyright (C) 2016 Inria" __license__ = "MIT" parser = argparse.ArgumentParser( - description="TangentialComplex creation from " "points read in a OFF file.", + description="TangentialComplex creation from points read in a OFF file.", epilog="Example: " "example/tangential_complex_plain_homology_from_off_file_example.py " "-f ../data/points/tore3D_300.off -i 3" @@ -42,10 +45,11 @@ args = parser.parse_args() with open(args.file, "r") as f: first_line = f.readline() if (first_line == "OFF\n") or (first_line == "nOFF\n"): - print("#####################################################################") + print("##############################################################") print("TangentialComplex creation from points read in a OFF file") - tc = gudhi.TangentialComplex(intrisic_dim=args.intrisic_dim, off_file=args.file) + tc = gudhi.TangentialComplex(intrisic_dim=args.intrisic_dim, + off_file=args.file) tc.compute_tangential_complex() st = tc.create_simplex_tree() @@ -61,6 +65,7 @@ with open(args.file, "r") as f: gudhi.plot_persistence_diagram(diag, band=args.band) plot.show() else: - print(args.file, "is not a valid OFF file", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + args.file) f.close() diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx index dab4b56f..e04dc652 100644 --- a/src/python/gudhi/alpha_complex.pyx +++ b/src/python/gudhi/alpha_complex.pyx @@ -1,5 +1,7 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - +# which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full +# license details. # Author(s): Vincent Rouvreau # # Copyright (C) 2016 Inria @@ -14,7 +16,6 @@ from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool from libc.stdint cimport intptr_t -import sys import os from gudhi.simplex_tree cimport * @@ -71,7 +72,8 @@ cdef class AlphaComplex: def __cinit__(self, points = None, off_file = ''): if off_file: if os.path.isfile(off_file): - self.thisptr = new Alpha_complex_interface(off_file.encode('utf-8'), True) + self.thisptr = new Alpha_complex_interface( + off_file.encode('utf-8'), True) else: print("file " + off_file + " not found.") else: diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 1dd30b4e..463bd4ee 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -1,5 +1,7 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - +# which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full +# license details. # Author(s): Vincent Rouvreau # # Copyright (C) 2016 Inria @@ -13,7 +15,7 @@ from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool -import sys +import errno import os import numpy as np @@ -89,7 +91,8 @@ cdef class CubicalComplex: if os.path.isfile(perseus_file): self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8')) else: - print("file " + perseus_file + " not found.", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + perseus_file) else: print("CubicalComplex can be constructed from dimensions and " "top_dimensional_cells or from a Perseus-style file name.", diff --git a/src/python/gudhi/off_reader.pyx b/src/python/gudhi/off_reader.pyx index 0a828b83..a3200704 100644 --- a/src/python/gudhi/off_reader.pyx +++ b/src/python/gudhi/off_reader.pyx @@ -1,5 +1,7 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - +# which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full +# license details. # Author(s): Vincent Rouvreau # # Copyright (C) 2016 Inria @@ -11,7 +13,7 @@ from __future__ import print_function from cython cimport numeric from libcpp.vector cimport vector from libcpp.string cimport string -import sys +import errno import os __author__ = "Vincent Rouvreau" @@ -34,6 +36,6 @@ def read_points_from_off_file(off_file=''): if os.path.isfile(off_file): return read_points_from_OFF_file(off_file.encode('utf-8')) else: - print("file " + off_file + " not found.", file=sys.stderr) - return [] + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + off_file) -- cgit v1.2.3 From 26ef6e922c358f68d2bbee3aba20a1722c5150a1 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 6 Feb 2020 11:01:57 +0100 Subject: Use exceptions uinstead of error message for non existing files --- src/python/gudhi/cubical_complex.pyx | 4 ++-- src/python/gudhi/nerve_gic.pyx | 37 ++++++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 463bd4ee..31287d15 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -95,8 +95,8 @@ cdef class CubicalComplex: perseus_file) else: print("CubicalComplex can be constructed from dimensions and " - "top_dimensional_cells or from a Perseus-style file name.", - file=sys.stderr) + "top_dimensional_cells or from a Perseus-style file name.", + file=sys.stderr) def __dealloc__(self): if self.thisptr != NULL: diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx index 022466c5..e291579b 100644 --- a/src/python/gudhi/nerve_gic.pyx +++ b/src/python/gudhi/nerve_gic.pyx @@ -1,5 +1,7 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - +# which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full +# license details. # Author(s): Vincent Rouvreau # # Copyright (C) 2018 Inria @@ -13,7 +15,7 @@ from libcpp.vector cimport vector from libcpp.utility cimport pair from libcpp.string cimport string from libcpp cimport bool -import sys +import errno import os from libc.stdint cimport intptr_t @@ -98,7 +100,8 @@ cdef class CoverComplex: return self.thisptr != NULL def set_point_cloud_from_range(self, cloud): - """ Reads and stores the input point cloud from a vector stored in memory. + """ Reads and stores the input point cloud from a vector stored in + memory. :param cloud: Input vector containing the point cloud. :type cloud: vector[vector[double]] @@ -106,7 +109,8 @@ cdef class CoverComplex: return self.thisptr.set_point_cloud_from_range(cloud) def set_distances_from_range(self, distance_matrix): - """ Reads and stores the input distance matrix from a vector stored in memory. + """ Reads and stores the input distance matrix from a vector stored in + memory. :param distance_matrix: Input vector containing the distance matrix. :type distance_matrix: vector[vector[double]] @@ -165,7 +169,8 @@ cdef class CoverComplex: """ stree = SimplexTree() cdef intptr_t stree_int_ptr=stree.thisptr - self.thisptr.create_simplex_tree(stree_int_ptr) + self.thisptr.create_simplex_tree( + stree_int_ptr) return stree def find_simplices(self): @@ -184,8 +189,8 @@ cdef class CoverComplex: if os.path.isfile(off_file): return self.thisptr.read_point_cloud(off_file.encode('utf-8')) else: - print("file " + off_file + " not found.", file=sys.stderr) - return False + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + off_file) def set_automatic_resolution(self): """Computes the optimal length of intervals (i.e. the smallest interval @@ -216,7 +221,8 @@ cdef class CoverComplex: if os.path.isfile(color_file_name): self.thisptr.set_color_from_file(color_file_name.encode('utf-8')) else: - print("file " + color_file_name + " not found.", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + color_file_name) def set_color_from_range(self, color): """Computes the function used to color the nodes of the simplicial @@ -237,7 +243,8 @@ cdef class CoverComplex: if os.path.isfile(cover_file_name): self.thisptr.set_cover_from_file(cover_file_name.encode('utf-8')) else: - print("file " + cover_file_name + " not found.", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + cover_file_name) def set_cover_from_function(self): """Creates a cover C from the preimages of the function f. @@ -270,7 +277,8 @@ cdef class CoverComplex: if os.path.isfile(func_file_name): self.thisptr.set_function_from_file(func_file_name.encode('utf-8')) else: - print("file " + func_file_name + " not found.", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + func_file_name) def set_function_from_range(self, function): """Creates the function f from a vector stored in memory. @@ -304,14 +312,15 @@ cdef class CoverComplex: """Creates a graph G from a file containing the edges. :param graph_file_name: Name of the input graph file. The graph file - contains one edge per line, each edge being represented by the IDs of - its two nodes. + contains one edge per line, each edge being represented by the IDs + of its two nodes. :type graph_file_name: string """ if os.path.isfile(graph_file_name): self.thisptr.set_graph_from_file(graph_file_name.encode('utf-8')) else: - print("file " + graph_file_name + " not found.", file=sys.stderr) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), + graph_file_name) def set_graph_from_OFF(self): """Creates a graph G from the triangulation given by the input OFF -- cgit v1.2.3 From 00c46d21df80c51a0c83e412230f4583a5803fc9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 6 Feb 2020 19:27:36 +0100 Subject: Print pybind11 version, protect test --- src/python/CMakeLists.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index edb1ba02..090a7446 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -32,6 +32,10 @@ function( add_gudhi_debug_info DEBUG_INFO ) endfunction( add_gudhi_debug_info ) if(PYTHONINTERP_FOUND) + if(PYBIND11_FOUND) + add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") + endif() if(CYTHON_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'simplex_tree', ") @@ -391,9 +395,9 @@ endif(CGAL_FOUND) add_gudhi_py_test(test_reader_utils) # Wasserstein - if(OT_FOUND) + if(OT_FOUND AND PYBIND11_FOUND) add_gudhi_py_test(test_wasserstein_distance) - endif(OT_FOUND) + endif() # Representations if(SKLEARN_FOUND AND MATPLOTLIB_FOUND) -- cgit v1.2.3 From 08b82e8a606a7fcd1219e7074cc2f15340090e59 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 6 Feb 2020 21:20:17 +0100 Subject: Make pybind11 mandatory and simplify The use of install_requires and setup_requires looks strange, I would expect cython in setup_requires, not install_requires. But setup_requires doesn't seem to work so well anyway. --- src/python/doc/installation.rst | 13 +++---------- src/python/setup.py.in | 19 +++---------------- 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index f8456799..d459145b 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -14,10 +14,11 @@ Compiling ********* The library uses c++14 and requires `Boost `_ ≥ 1.56.0, `CMake `_ ≥ 3.1 to generate makefiles, -`NumPy `_ and `Cython `_ to compile +`NumPy `_, `Cython `_ and +`pybind11 `_ to compile the GUDHI Python module. It is a multi-platform library and compiles on Linux, Mac OSX and Visual -Studio 2015. +Studio 2017. On `Windows `_ , only Python ≥ 3.5 are available because of the required Visual Studio version. @@ -257,14 +258,6 @@ The :doc:`Wasserstein distance ` module requires `POT `_, a library that provides several solvers for optimization problems related to Optimal Transport. -Pybind11 -======== - -The :doc:`Wasserstein distance ` module requires -`pybind11 `_, a library that provides -interoperability between C++ and Python, for its interface to `Hera -`_. - Scikit-learn ============ diff --git a/src/python/setup.py.in b/src/python/setup.py.in index 851188bd..d05e4675 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -12,6 +12,7 @@ from setuptools import setup, Extension from Cython.Build import cythonize from numpy import get_include as numpy_get_include import sys +import pybind11 __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -27,20 +28,6 @@ library_dirs=[@GUDHI_PYTHON_LIBRARY_DIRS@] include_dirs = [numpy_get_include(), '@CMAKE_CURRENT_SOURCE_DIR@/gudhi/', @GUDHI_PYTHON_INCLUDE_DIRS@] runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@] -# Copied from https://github.com/pybind/python_example/blob/master/setup.py -class get_pybind_include(object): - """Helper class to determine the pybind11 include path - The purpose of this class is to postpone importing pybind11 - until it is actually installed, so that the ``get_include()`` - method can be invoked. """ - - def __init__(self, user=False): - self.user = user - - def __str__(self): - import pybind11 - return pybind11.get_include(self.user) - # Create ext_modules list from module list ext_modules = [] for module in modules: @@ -64,7 +51,7 @@ ext_modules.append(Extension( language = 'c++', include_dirs = include_dirs + ['@HERA_WASSERSTEIN_INCLUDE_DIR@', - get_pybind_include(False), get_pybind_include(True)], + pybind11.get_include(False), pybind11.get_include(True)], extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], )) @@ -76,6 +63,6 @@ setup( version='@GUDHI_VERSION@', url='http://gudhi.gforge.inria.fr/', ext_modules = ext_modules, - install_requires = ['cython','numpy >= 1.9','pybind11',], + install_requires = ['cython','numpy >= 1.9',], setup_requires = ['numpy >= 1.9','pybind11',], ) -- cgit v1.2.3 From 518c619d578dc6f168b6369417f15872e3cd0056 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 6 Feb 2020 21:54:44 +0100 Subject: use bibtex --- biblio/bibliography.bib | 12 ++++++++++++ src/python/doc/wasserstein_distance_user.rst | 10 +++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index a1b951e0..3bbe7960 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -1180,3 +1180,15 @@ language={English} booktitle = {In Neural Information Processing Systems}, year = {2007} } +@inproceedings{10.5555/3327546.3327645, +author = {Lacombe, Th\'{e}o and Cuturi, Marco and Oudot, Steve}, +title = {Large Scale Computation of Means and Clusters for Persistence Diagrams Using Optimal Transport}, +year = {2018}, +publisher = {Curran Associates Inc.}, +address = {Red Hook, NY, USA}, +booktitle = {Proceedings of the 32nd International Conference on Neural Information Processing Systems}, +pages = {9792–9802}, +numpages = {11}, +location = {Montr\'{e}al, Canada}, +series = {NIPS’18} +} diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 648cc568..99445b99 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -13,15 +13,15 @@ Functions --------- This implementation uses the Python Optimal Transport library and is based on ideas from "Large Scale Computation of Means and Cluster for Persistence -Diagrams via Optimal Transport". +Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`. .. autofunction:: gudhi.wasserstein.wasserstein_distance This other implementation comes from `Hera -`_ (BSD-3-Clause) and is -based on `"Geometry Helps to Compare Persistence Diagrams." -`_ by Michael Kerber, Dmitriy -Morozov, and Arnur Nigmetov, at ALENEX 2016. +`_ (BSD-3-Clause) which is +based on "Geometry Helps to Compare Persistence Diagrams" +:cite:`Kerber:2017:GHC:3047249.3064175` by Michael Kerber, Dmitriy +Morozov, and Arnur Nigmetov. .. autofunction:: gudhi.hera.wasserstein_distance -- cgit v1.2.3 From e8c908469cb4ac547d4fd46ad8daf5ee21739f58 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 6 Feb 2020 22:14:08 +0100 Subject: pytest.approx --- src/python/test/test_wasserstein_distance.py | 34 ++++++++++++++-------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 46a7079f..6a14c50e 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -11,6 +11,7 @@ from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np +import pytest __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" @@ -24,32 +25,31 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True): emptydiag = np.array([]) # We just need to handle positive numbers here - def approx(a, b): - f = 1 + delta - return a <= b*f and b <= a*f + def approx(x): + return pytest.approx(x, rel=delta) assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=1.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=1.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=2.) == 0. assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=2.) == 0. - assert approx(wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.), 2.) - assert approx(wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.), 4.) + assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == approx(2.) + assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == approx(4.) - assert approx(wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.), 5.) # thank you Pythagorician triplets - assert approx(wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.), 2.5) - assert approx(wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.), 3.5355339059327378) + assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == approx(5.) # thank you Pythagorician triplets + assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == approx(2.5) + assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == approx(3.5355339059327378) - assert approx(wasserstein_distance(diag1, diag2, internal_p=2., order=1.) , 1.4453593023967701) - assert approx(wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74), 0.9772734057168739) + assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == approx(1.4453593023967701) + assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == approx(0.9772734057168739) - assert approx(wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863), 3.141592214572228) + assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == approx(3.141592214572228) - assert approx(wasserstein_distance(diag3, diag4, internal_p=1., order=1.), 3.) - assert approx(wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.), 3.) # no diag matching here - assert approx(wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.), np.sqrt(5)) - assert approx(wasserstein_distance(diag3, diag4, internal_p=1., order=2.), np.sqrt(5)) - assert approx(wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.), np.sqrt(5)) + assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == approx(3.) + assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == approx(3.) # no diag matching here + assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == approx(np.sqrt(5)) + assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == approx(np.sqrt(5)) + assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == approx(np.sqrt(5)) if(not test_infinity): return @@ -58,7 +58,7 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True): diag6 = np.array([[7, 8], [4, 6], [3, np.inf]]) assert wasserstein_distance(diag4, diag5) == np.inf - assert approx(wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf), 4.) + assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.) def hera_wrap(delta): def fun(*kargs,**kwargs): -- cgit v1.2.3 From 5c037fb06250e93ad04bb45bdbceb937701e03fa Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 7 Feb 2020 16:32:33 +0100 Subject: Bad link for last version --- src/common/doc/header.html | 2 +- src/python/doc/_templates/layout.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/doc/header.html b/src/common/doc/header.html index 9fdb2321..99ab6bb7 100644 --- a/src/common/doc/header.html +++ b/src/common/doc/header.html @@ -56,7 +56,7 @@ $extrastylesheet Download diff --git a/src/python/doc/_templates/layout.html b/src/python/doc/_templates/layout.html index 2f2d9c72..a672a281 100644 --- a/src/python/doc/_templates/layout.html +++ b/src/python/doc/_templates/layout.html @@ -201,7 +201,7 @@ Download -- cgit v1.2.3 From 7be3cfef278917dc0c1905588ae88314273909d4 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 7 Feb 2020 19:38:27 +0100 Subject: More uniform notations between the 2 wassersteins --- src/python/gudhi/wasserstein.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index db5ddff2..b1cfd588 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -27,8 +27,8 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): ''' :param X: (n x 2) numpy.array encoding the (points of the) first diagram. :param Y: (m x 2) numpy.array encoding the second diagram. - :param internal_p: Ground metric (i.e. norm l_p). :param order: exponent for the Wasserstein metric. + :param internal_p: Ground metric (i.e. norm L^p). :returns: (n+1) x (m+1) np.array encoding the cost matrix C. For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal. note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal). @@ -54,8 +54,8 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): def _perstot(X, order, internal_p): ''' :param X: (n x 2) numpy.array (points of a given diagram). - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (Euclidean norm). :param order: exponent for Wasserstein. Default value is 2. + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). ''' Xdiag = _proj_on_diag(X) @@ -66,8 +66,8 @@ def wasserstein_distance(X, Y, order=2., internal_p=2.): ''' :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (euclidean norm). :param order: exponent for Wasserstein; Default value is 2. + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (euclidean norm). :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. :rtype: float ''' -- cgit v1.2.3 From 458ee3e95c752f09058d933349851c8a3a730cad Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 7 Feb 2020 19:41:38 +0100 Subject: Name argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Théo Lacombe --- src/python/test/test_wasserstein_distance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 6a14c50e..4bc7114e 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -66,7 +66,7 @@ def hera_wrap(delta): return fun def test_wasserstein_distance_pot(): - _basic_wasserstein(pot, 1e-15, False) + _basic_wasserstein(pot, 1e-15, test_infinity=False) def test_wasserstein_distance_hera(): _basic_wasserstein(hera_wrap(1e-12), 1e-12) -- cgit v1.2.3 From 29e81d5038116aef0ec505e4d21d29f1c5920e34 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 7 Feb 2020 21:00:17 -0500 Subject: added sklearn trick --- src/python/gudhi/representations/kernel_methods.py | 20 +++--------- src/python/gudhi/representations/metrics.py | 37 +++++++++------------- 2 files changed, 20 insertions(+), 37 deletions(-) diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py index bbbb7c31..d89f69ab 100644 --- a/src/python/gudhi/representations/kernel_methods.py +++ b/src/python/gudhi/representations/kernel_methods.py @@ -62,27 +62,17 @@ def pairwise_persistence_diagram_kernels(X, Y=None, metric="sliced_wasserstein", :param metric: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. :returns: kernel matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) :rtype: float - """ - if Y is None: - YY = None - pX = Padding(use=True).fit_transform(X) - diag_len = len(pX[0]) - XX = np.reshape(np.vstack(pX), [-1, diag_len*3]) - else: - nX, nY = len(X), len(Y) - pD = Padding(use=True).fit_transform(X + Y) - diag_len = len(pD[0]) - XX = np.reshape(np.vstack(pD[:nX]), [-1, diag_len*3]) - YY = np.reshape(np.vstack(pD[nX:]), [-1, diag_len*3]) - + """ + XX = np.reshape(np.arange(len(X)), [-1,1]) + YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) if metric == "sliced_wasserstein": return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"]) elif metric == "persistence_fisher": return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"]) elif metric == "persistence_scale_space": - return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_scale_space_kernel, **kwargs)) + return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_scale_space_kernel, X, Y, **kwargs)) elif metric == "persistence_weighted_gaussian": - return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_weighted_gaussian_kernel, **kwargs)) + return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_weighted_gaussian_kernel, X, Y, **kwargs)) else: return pairwise_kernels(XX, YY, metric=sklearn_wrapper(metric, **kwargs)) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index cc788994..fead8aa0 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -85,13 +85,16 @@ def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): vectorj = vectorj/vectorj_sum return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) -def sklearn_wrapper(metric, **kwargs): +def sklearn_wrapper(metric, X, Y, **kwargs): """ - This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments. It turns the metric into another that takes flattened and padded diagrams as inputs. + This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments. """ - def flat_metric(D1, D2): - DD1, DD2 = np.reshape(D1, [-1,3]), np.reshape(D2, [-1,3]) - return metric(DD1[DD1[:,2]==1,0:2], DD2[DD2[:,2]==1,0:2], **kwargs) + if Y is None: + def flat_metric(a, b): + return metric(X[int(a[0])], X[int(b[0])], **kwargs) + else: + def flat_metric(a, b): + return metric(X[int(a[0])], Y[int(b[0])], **kwargs) return flat_metric def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs): @@ -103,28 +106,18 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa :returns: distance matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) :rtype: float """ - if Y is None: - YY = None - pX = Padding(use=True).fit_transform(X) - diag_len = len(pX[0]) - XX = np.reshape(np.vstack(pX), [-1, diag_len*3]) - else: - nX, nY = len(X), len(Y) - pD = Padding(use=True).fit_transform(X + Y) - diag_len = len(pD[0]) - XX = np.reshape(np.vstack(pD[:nX]), [-1, diag_len*3]) - YY = np.reshape(np.vstack(pD[nX:]), [-1, diag_len*3]) - + XX = np.reshape(np.arange(len(X)), [-1,1]) + YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) if metric == "bottleneck": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, **kwargs)) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) elif metric == "wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(wasserstein_distance, **kwargs)) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(wasserstein_distance, X, Y, **kwargs)) elif metric == "sliced_wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, **kwargs)) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, X, Y, **kwargs)) elif metric == "persistence_fisher": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(persistence_fisher_distance, **kwargs)) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(persistence_fisher_distance, X, Y, **kwargs)) else: - return pairwise_distances(XX, YY, metric=sklearn_wrapper(metric, **kwargs)) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(metric, X, Y, **kwargs)) class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): """ -- cgit v1.2.3 From b75123eeda446e7f778d4939da67a78e4c8c6abc Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 8 Feb 2020 17:39:05 +0100 Subject: Euclidean with a capital E --- src/Bottleneck_distance/include/gudhi/Persistence_graph.h | 2 +- src/python/doc/wasserstein_distance_user.rst | 2 +- src/python/gudhi/wasserstein.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Bottleneck_distance/include/gudhi/Persistence_graph.h b/src/Bottleneck_distance/include/gudhi/Persistence_graph.h index f791e37c..e1e3522e 100644 --- a/src/Bottleneck_distance/include/gudhi/Persistence_graph.h +++ b/src/Bottleneck_distance/include/gudhi/Persistence_graph.h @@ -25,7 +25,7 @@ namespace Gudhi { namespace persistence_diagram { -/** \internal \brief Structure representing an euclidean bipartite graph containing +/** \internal \brief Structure representing a Euclidean bipartite graph containing * the points from the two persistence diagrams (including the projections). * * \ingroup bottleneck_distance diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 99445b99..94b454e2 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -28,7 +28,7 @@ Morozov, and Arnur Nigmetov. Basic example ------------- -This example computes the 1-Wasserstein distance from 2 persistence diagrams with euclidean ground metric. +This example computes the 1-Wasserstein distance from 2 persistence diagrams with Euclidean ground metric. Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. .. testcode:: diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index b1cfd588..13102094 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -67,7 +67,7 @@ def wasserstein_distance(X, Y, order=2., internal_p=2.): :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. :param order: exponent for Wasserstein; Default value is 2. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (euclidean norm). + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. :rtype: float ''' -- cgit v1.2.3 From 486fc4b560c61e936e6aae83ce90994f318517df Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 8 Feb 2020 23:23:50 +0100 Subject: Add tensorflow to the circleci docker image Needed for perslay --- Dockerfile_for_circleci_image | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index b7d0dcca..ebd2f366 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -58,7 +58,8 @@ RUN pip3 install \ scikit-learn \ sphinx \ sphinx-paramlinks \ - sphinxcontrib-bibtex + sphinxcontrib-bibtex \ + tensorflow # apt clean up RUN apt autoremove && rm -rf /var/lib/apt/lists/* -- cgit v1.2.3 From f2c85ed1fd87f9ca50b1ed80135b6eea21d08c33 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 10 Feb 2020 11:05:40 +0100 Subject: Move next_release in for_dev directory. Add a for_dev/for_maintainer directory to explain how to create a GUDHI release --- CMakeLists.txt | 2 + code_conventions.md | 26 ------ for_dev/code_conventions.md | 26 ++++++ .../for_maintainers/new_gudhi_version_creation.md | 97 ++++++++++++++++++++++ for_dev/for_maintainers/next_release_template.md | 28 +++++++ for_dev/next_release.md | 28 +++++++ next_release.md | 14 ---- 7 files changed, 181 insertions(+), 40 deletions(-) delete mode 100644 code_conventions.md create mode 100644 for_dev/code_conventions.md create mode 100644 for_dev/for_maintainers/new_gudhi_version_creation.md create mode 100644 for_dev/for_maintainers/next_release_template.md create mode 100644 for_dev/next_release.md delete mode 100644 next_release.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 5dcc6803..d9244dc0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,5 +65,7 @@ include(GUDHI_user_version_target) # For "make doxygen" - Requires GUDHI_USER_VERSION_DIR to be set - Done in GUDHI_user_version_target for dev version include(GUDHI_doxygen_target) +configure_file(${CMAKE_SOURCE_DIR}/for_dev/for_maintainers/new_gudhi_version_creation.md "${CMAKE_CURRENT_BINARY_DIR}/" @ONLY) + message("++ GUDHI_MODULES list is:\"${GUDHI_MODULES}\"") message("++ GUDHI_MISSING_MODULES list is:\"${GUDHI_MISSING_MODULES}\"") diff --git a/code_conventions.md b/code_conventions.md deleted file mode 100644 index 5882f78e..00000000 --- a/code_conventions.md +++ /dev/null @@ -1,26 +0,0 @@ -# Naming conventions - -## C++ - -### In the code: -* The classes and functions of a package should be in a sub-namespace of the `Gudhi` namespace. The sub-namespace names are in lowercase and use underscore separators. E.g. `Gudhi::package_name::` -* Concepts are named with camel case starting with uppercase. E.g. `PersistentHomology` for the concept of Persitence homology. -* Classes start with an uppercase letter and use underscore separators. E.g. `Skeleton_blocker_contractor`. -* Member functions and free functions are in lowercase and use underscore separators. E.g. `int num_vertices()`. -* Constants and macros are in uppercase. -* Macros should begin with the prefix `GUDHI_`. - -### File names: -* All headers are named *.h and all sources are named *.cpp. -* If a single class or function is provided in a file, its name (with the same letter case) should be used for the file name. -* If a file does not contain a single class, its name should not begin with a capital letter. -* Test files should be called `test_[what_is_tested].cpp`. E.g. `test_sparsify_point_set.cpp` -* Example files should be called `example_[what_it_is].cpp`. E.g. `example_sparsify_point_set.cpp` - -### In CMakeLists.txt files: -* The name of the "project" should be in this form: `Package_[tests|examples|…]`. E.g. `project(Simplex_tree_examples)`. -* The name if each "target" (first parameter of add_executable) should be in this form: `Package_{name of the cpp file without extension}`. E.g `add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)`. - -## Python - -In progress... \ No newline at end of file diff --git a/for_dev/code_conventions.md b/for_dev/code_conventions.md new file mode 100644 index 00000000..5882f78e --- /dev/null +++ b/for_dev/code_conventions.md @@ -0,0 +1,26 @@ +# Naming conventions + +## C++ + +### In the code: +* The classes and functions of a package should be in a sub-namespace of the `Gudhi` namespace. The sub-namespace names are in lowercase and use underscore separators. E.g. `Gudhi::package_name::` +* Concepts are named with camel case starting with uppercase. E.g. `PersistentHomology` for the concept of Persitence homology. +* Classes start with an uppercase letter and use underscore separators. E.g. `Skeleton_blocker_contractor`. +* Member functions and free functions are in lowercase and use underscore separators. E.g. `int num_vertices()`. +* Constants and macros are in uppercase. +* Macros should begin with the prefix `GUDHI_`. + +### File names: +* All headers are named *.h and all sources are named *.cpp. +* If a single class or function is provided in a file, its name (with the same letter case) should be used for the file name. +* If a file does not contain a single class, its name should not begin with a capital letter. +* Test files should be called `test_[what_is_tested].cpp`. E.g. `test_sparsify_point_set.cpp` +* Example files should be called `example_[what_it_is].cpp`. E.g. `example_sparsify_point_set.cpp` + +### In CMakeLists.txt files: +* The name of the "project" should be in this form: `Package_[tests|examples|…]`. E.g. `project(Simplex_tree_examples)`. +* The name if each "target" (first parameter of add_executable) should be in this form: `Package_{name of the cpp file without extension}`. E.g `add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)`. + +## Python + +In progress... \ No newline at end of file diff --git a/for_dev/for_maintainers/new_gudhi_version_creation.md b/for_dev/for_maintainers/new_gudhi_version_creation.md new file mode 100644 index 00000000..74d818f3 --- /dev/null +++ b/for_dev/for_maintainers/new_gudhi_version_creation.md @@ -0,0 +1,97 @@ +# Create a new GUDHI version + +We will consider that all operations will be performed in a brand new clone of the main project: +```bash +git clone https://github.com/GUDHI/gudhi-devel.git +cd gudhi-devel +``` + +## Version file modification + +**Edit the file CMakeGUDHIVersion.txt**, and increment major, minor, or patch version number, in function of the version new delivery. +```bash +# cf. .gitignore - ignore this if it is a fresh clone version +rm -rf data/points/COIL_database/lucky_cat.off_dist data/points/COIL_database/lucky_cat.off_sc.dot data/points/KleinBottle5D.off_dist data/points/KleinBottle5D.off_sc.dot data/points/human.off_dist data/points/human.off_sc.off data/points/human.off_sc.txt +``` + +Checkin the modifications, build and test the version: +```bash +mkdir build +cd build +cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 .. +make user_version +date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt +tar -czvf gudhi.@GUDHI_VERSION@.tar.gz gudhi.@GUDHI_VERSION@ +md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt +sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt +sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt + +make -j all test +``` + +***[Check there are no error]*** + +## Create the documentation +```bash +mkdir gudhi.doc.@GUDHI_VERSION@ +make doxygen 2>&1 | tee dox.log && grep warning dox.log +``` + +***[Check there are no error and the warnings]*** + +```bash +cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp +cd gudhi.@GUDHI_VERSION@ +rm -rf build; mkdir build; cd build; cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 .. +export LC_ALL=en_US.UTF-8 # cf. bug +make sphinx +``` + +***[Check there are no error]*** + +```bash +cp -R python/sphinx ../../gudhi.doc.@GUDHI_VERSION@/python +cd ../.. +tar -czvf gudhi.doc.@GUDHI_VERSION@.tar.gz gudhi.doc.@GUDHI_VERSION@ + +cd gudhi.@GUDHI_VERSION@/build +make all test +``` + +***[Check there are no error]*** + +## Upload the documentation + +Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/cpp in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/doc/@GUDHI_VERSION@ + +Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/python in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/python/@GUDHI_VERSION@ + +Through ssh, make the **latest** link to your new version of the documentation: +```bash +ssh ForgeLogin@scm.gforge.inria.fr +cd /home/groups/gudhi/htdocs/doc +rm latest +ln -s @GUDHI_VERSION@ latest +cd /home/groups/gudhi/htdocs/python +rm latest +ln -s @GUDHI_VERSION@ latest +``` + +## Put a version label on files + +* Go on page https://github.com/GUDHI/gudhi-devel/releases/new +* Name the tag: tags/gudhi-release-@GUDHI_VERSION@ +* Name the release GUDHI @GUDHI_VERSION@ +* Write the release note +* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *md5sum.txt*, *sha256sum.txt*, *sha512sum.txt* files +* Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version) +* Click the *Publish the release* button + +***[Where X, Y and Z corresponds respectively to the major, minor, and patch version number]*** + + +===Mail sending=== +Send version mail to the following lists : +gudhi-devel@lists.gforge.inria.fr +gudhi-users@lists.gforge.inria.fr (not for release candidate) + diff --git a/for_dev/for_maintainers/next_release_template.md b/for_dev/for_maintainers/next_release_template.md new file mode 100644 index 00000000..a2805a55 --- /dev/null +++ b/for_dev/for_maintainers/next_release_template.md @@ -0,0 +1,28 @@ +We are pleased to announce the release 3.X.X of the GUDHI library. + +As a major new feature, the GUDHI library now offers ... + +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). + +Below is a list of changes made since GUDHI 3.X-1.X-1: + +- [Module](link) + - ... + +- [Module](link) + - ... + +- Miscellaneous + - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. + +All modules are distributed under the terms of the MIT license. +However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. + +We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. + +We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. + +Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. + +For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). + diff --git a/for_dev/next_release.md b/for_dev/next_release.md new file mode 100644 index 00000000..a2805a55 --- /dev/null +++ b/for_dev/next_release.md @@ -0,0 +1,28 @@ +We are pleased to announce the release 3.X.X of the GUDHI library. + +As a major new feature, the GUDHI library now offers ... + +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). + +Below is a list of changes made since GUDHI 3.X-1.X-1: + +- [Module](link) + - ... + +- [Module](link) + - ... + +- Miscellaneous + - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. + +All modules are distributed under the terms of the MIT license. +However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. + +We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. + +We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. + +Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. + +For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). + diff --git a/next_release.md b/next_release.md deleted file mode 100644 index 78270d15..00000000 --- a/next_release.md +++ /dev/null @@ -1,14 +0,0 @@ -gudhi-3.1.1 is a bug-fix release. In particular, it fixes the installation of the Python representation module. - -The [list of bugs that were solved since gudhi-3.1.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. - -All modules are distributed under the terms of the MIT license. -However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. - -We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. - -We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. - -Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. - -For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/3.1.1/installation.html) or [Python](https://gudhi.inria.fr/python/3.1.1/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). -- cgit v1.2.3 From f7317664c051dd1f49861c2e22c3bf3ca471052c Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 10 Feb 2020 13:21:01 +0100 Subject: Gudhi version 3.1.1 --- Dockerfile_gudhi_installation | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation index d5d86338..33864d11 100644 --- a/Dockerfile_gudhi_installation +++ b/Dockerfile_gudhi_installation @@ -57,11 +57,11 @@ RUN pip3 install \ # apt clean up RUN apt autoremove && rm -rf /var/lib/apt/lists/* -RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.1.0/gudhi.3.1.0.tar.gz" \ -&& tar xf gudhi.3.1.0.tar.gz \ -&& cd gudhi.3.1.0 \ +RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.1.1/gudhi.3.1.1.tar.gz" \ +&& tar xf gudhi.3.1.1.tar.gz \ +&& cd gudhi.3.1.1 \ && mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_PYTHON=OFF -DPython_ADDITIONAL_VERSIONS=3 .. \ && make all test install \ && cmake -DWITH_GUDHI_PYTHON=ON . \ && cd python \ -&& python3 setup.py install \ No newline at end of file +&& python3 setup.py install -- cgit v1.2.3 From a3b15cf6c7bcdcc815c3c9a4a1d6876a5b29873f Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 10 Feb 2020 14:14:47 +0100 Subject: Markdown was not correct --- for_dev/for_maintainers/new_gudhi_version_creation.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/for_dev/for_maintainers/new_gudhi_version_creation.md b/for_dev/for_maintainers/new_gudhi_version_creation.md index 74d818f3..4a40f373 100644 --- a/for_dev/for_maintainers/new_gudhi_version_creation.md +++ b/for_dev/for_maintainers/new_gudhi_version_creation.md @@ -90,8 +90,8 @@ ln -s @GUDHI_VERSION@ latest ***[Where X, Y and Z corresponds respectively to the major, minor, and patch version number]*** -===Mail sending=== +## Mail sending Send version mail to the following lists : -gudhi-devel@lists.gforge.inria.fr -gudhi-users@lists.gforge.inria.fr (not for release candidate) +* gudhi-devel@lists.gforge.inria.fr +* gudhi-users@lists.gforge.inria.fr (not for release candidate) -- cgit v1.2.3 From ee0f12f1df406c81c6ad860c494eed908021fad9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 8 Feb 2020 19:54:46 +0100 Subject: Use setuptools.find_packages --- src/python/setup.py.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/setup.py.in b/src/python/setup.py.in index f993165c..bd7fb180 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -8,7 +8,7 @@ - YYYY/MM Author: Description of the modification """ -from setuptools import setup, Extension +from setuptools import setup, Extension, find_packages from Cython.Build import cythonize from numpy import get_include as numpy_get_include import sys @@ -44,7 +44,7 @@ for module in modules: setup( name = 'gudhi', - packages=["gudhi","gudhi.representations"], + packages=find_packages(), # find_namespace_packages(include=["gudhi*"]) author='GUDHI Editorial Board', author_email='gudhi-contact@lists.gforge.inria.fr', version='@GUDHI_VERSION@', -- cgit v1.2.3 From d6f3165831d20bf3a91f1ff7e9734a574eaa567a Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 11 Feb 2020 13:06:48 +0100 Subject: License and author --- src/python/gudhi/hera.cc | 13 +++++++++++-- src/python/test/test_wasserstein_distance.py | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 61f0da10..0d562b4c 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -1,9 +1,19 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Marc Glisse + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + #include #include #include -#include +#include // Hera #include @@ -41,7 +51,6 @@ double wasserstein_distance( PYBIND11_MODULE(hera, m) { m.def("wasserstein_distance", &wasserstein_distance, py::arg("X"), py::arg("Y"), - // Should we name those q, p and d instead? py::arg("order") = 1, py::arg("internal_p") = std::numeric_limits::infinity(), py::arg("delta") = .01, diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 4bc7114e..6a6b217b 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -1,6 +1,6 @@ """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - Author(s): Theo Lacombe + Author(s): Theo Lacombe, Marc Glisse Copyright (C) 2019 Inria -- cgit v1.2.3 From 2eb23726256af164282830b21561b11db9bdde39 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 13:40:24 +0100 Subject: Code review: roll back to cout for output utilities --- src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h | 2 +- src/common/include/gudhi/writing_persistence_to_file.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index f556a064..0f1876d0 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -561,7 +561,7 @@ class Persistent_cohomology { * p1*...*pr is the product of prime numbers pi such that the homology * feature exists in homology with Z/piZ coefficients. */ - void output_diagram(std::ostream& ostream = std::clog) { + void output_diagram(std::ostream& ostream = std::cout) { cmp_intervals_by_length cmp(cpx_); std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); bool has_infinity = std::numeric_limits::has_infinity; diff --git a/src/common/include/gudhi/writing_persistence_to_file.h b/src/common/include/gudhi/writing_persistence_to_file.h index cdd8be0a..2e36b831 100644 --- a/src/common/include/gudhi/writing_persistence_to_file.h +++ b/src/common/include/gudhi/writing_persistence_to_file.h @@ -94,7 +94,7 @@ class Persistence_interval_common { **/ template void write_persistence_intervals_to_stream(const Persistence_interval_range& intervals, - std::ostream& out = std::clog) { + std::ostream& out = std::cout) { for (auto interval : intervals) { out << interval << "\n"; } -- cgit v1.2.3 From 91223f6158607dfbb94e38b69cc1ec9599a9cf19 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 14:31:48 +0100 Subject: Add ressources: default copyright files (Python and C++ versions) and a github guide for GUDHI contributors --- for_dev/copyright_template.h | 14 +++ for_dev/copyright_template.py | 10 ++ .../how_to_use_github_to_contribute_to_gudhi.md | 102 +++++++++++++++++++++ 3 files changed, 126 insertions(+) create mode 100644 for_dev/copyright_template.h create mode 100644 for_dev/copyright_template.py create mode 100644 for_dev/how_to_use_github_to_contribute_to_gudhi.md diff --git a/for_dev/copyright_template.h b/for_dev/copyright_template.h new file mode 100644 index 00000000..30034f1b --- /dev/null +++ b/for_dev/copyright_template.h @@ -0,0 +1,14 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): [AUTHOR NAME] + * + * Copyright (C) [YEAR] [COPYRIGHT] + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#ifndef [FILE_NAME]_H_ +#define [FILE_NAME]_H_ + +#endif // [FILE_NAME]_H_ \ No newline at end of file diff --git a/for_dev/copyright_template.py b/for_dev/copyright_template.py new file mode 100644 index 00000000..19de05e2 --- /dev/null +++ b/for_dev/copyright_template.py @@ -0,0 +1,10 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - +# which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license +# details. +# Author(s): [AUTHOR NAME] +# +# Copyright (C) [YEAR] [COPYRIGHT] +# +# Modification(s): +# - YYYY/MM Author: Description of the modification diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md new file mode 100644 index 00000000..86506602 --- /dev/null +++ b/for_dev/how_to_use_github_to_contribute_to_gudhi.md @@ -0,0 +1,102 @@ +# How to use github to contribute to gudhi + +Similar information is available in many places: +https://jarv.is/notes/how-to-pull-request-fork-github/ (this one is using `upstream/master` when creating a new branch) +https://help.github.com/en/github/getting-started-with-github/fork-a-repo +https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ +https://gist.github.com/Chaser324/ce0505fbed06b947d962 +etc + +## Get a github account +I assume the account is called "LOGIN", please replace as appropriate below. Log in to github.com using this account. + +## Fork GUDHI/gudhi-devel project +Go to https://github.com/GUDHI/gudhi-devel and click on "fork" (top right). Feel free to also click on the star next to it to show you like the project! +You can see your fork at https://github.com/LOGIN/gudhi-devel + +## Create a local clone on your computer +```bash +git clone https://github.com/LOGIN/gudhi-devel.git +``` + +This creates a directory gudhi-devel, which you are free to move around or rename. For the following, change to that directory: +```bash +cd gudhi-devel +``` + +## Configuring a remote for a fork +```bash +git remote add upstream https://github.com/GUDHI/gudhi-devel.git +``` + +because you want to see the real gudhi, not just your clone. +(It is perfectly possible to do things in the reverse order, clone from GUDHI and add the one in LOGIN as extra remote, but the names of the remotes may not match the rest of this document. You can change the name of a remote with `git remote rename oldname newname`) + +## Optional remotes +Optional, if you are interested in one of the old branches +git remote add oldies https://github.com/GUDHI/branches.git + +Or if you want to spy on someone's work. I assume the someone's account is called "SOMEONE" +git remote add someone https://github.com/SOMEONE/gudhi-devel.git + +## Download +```bash +git fetch -p --all +``` +This is a command you can run quite regularly. +It tells git to check all that happened on github. +It is safe, it will not mess with your files. + +## Create a branch, based on the current master +git checkout -b some-fancy-name --no-track upstream/master +Your local branch "master" and the one on your github clone are useless and often outdated, but for technical reasons there has to exist at least one branch at all times, it might as well be that one. upstream/master is the real deal, that's what you want to base your new branch on. + +## The real coding is here! +Edit files, test, etc. + +## Commit your changes (locally) +The basic command is just `git commit`, but it will do nothing by default. +You need `git add my_new_file` for every new file you want to commit. +And usually you'll want to use `git commit -a` so that all files that git already knows about and that have been modified get committed. + +## Push your changes (remotely) +```bash +git push -u origin some-fancy-name +``` +This puts a copy of your branch on your online clone of gudhi-devel. +Because of `-u`, it will remember where you like to push this branch, and next time you can just use `git push`. + +## Play again! +Possibly iterate a few times, add more commits and push them. + +## Your pull request is ready +Get your web browser to https://github.com/LOGIN/gudhi-devel, click on the button that says **Branch: some-name** (below the number of commits, above the list of files) and select the branch you are so proud of. +Click on **New pull request** next to it. + +## Follow the instructions ;-) +Note that if your branch is not quite ready, you can make a **draft pull request** (see the arrow next to the confirmation button), and later you will have access to a button to say that the branch is ready for reviews now. +Draft pull requests can be a way to advertise that you are working on something, and possibly ask others for comments or help. + +## Code review +Make sure you follow the discussion on your pull request, answer questions, take comments into account. +You can keep pushing new commits on your branch to your fork of gudhi-devel, the pull request will automatically notice the new commits there. +There is no need to create a new pull request. +Once the branch is under review, fixing issues is good, but please refrain from adding extra features, that just makes the reviewers' job harder and thus slower. +You may want to look at https://github.com/settings/notifications (and other settings nearby) if you don't receive emails when people comment on your pull request. +Some bold reviewer might make changes to your branch. You will then need `git pull` for your local branch to reflect those. + +## Your work is merged! +Once your pull request has been closed (your branch merged), you can remove your branch, both locally +```bash +git checkout master # or any other branch, but you cannot remove the branch you are currently in +git branch -d some-fancy-name # local branch delete +git push origin --delete some-fancy-name # remote branch delete +``` +If you add @VincentRouvreau or @mglisse as collaborator (https://github.com/LOGIN/gudhi-devel/settings/collaboration), they may remove the branch on your clone at the same time as they merge the branch, so you only have the local one to remove (or keep if you are nostalgic). + +## Keep in touch +Create a new branch and keep contributing! +Do not try to reuse an old branch that has already been merged. +Make sure you run the fetch command just before creating any new branch, so you don't base it on some outdated version of master. +You can also work on several branches at the same time, using `git checkout some-fancy-name` and `git checkout name-of-other-branch` to switch between them (commit before switching or things may get complicated). + -- cgit v1.2.3 From 9a182406ff9a419931d7dc20d900515fda2c0ef0 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 14:37:36 +0100 Subject: Fix some typos --- .../how_to_use_github_to_contribute_to_gudhi.md | 23 ++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md index 86506602..b9a7f8f7 100644 --- a/for_dev/how_to_use_github_to_contribute_to_gudhi.md +++ b/for_dev/how_to_use_github_to_contribute_to_gudhi.md @@ -1,17 +1,18 @@ # How to use github to contribute to gudhi Similar information is available in many places: -https://jarv.is/notes/how-to-pull-request-fork-github/ (this one is using `upstream/master` when creating a new branch) -https://help.github.com/en/github/getting-started-with-github/fork-a-repo -https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ -https://gist.github.com/Chaser324/ce0505fbed06b947d962 -etc +* https://jarv.is/notes/how-to-pull-request-fork-github/ (this one is using `upstream/master` when creating a new branch) +* https://help.github.com/en/github/getting-started-with-github/fork-a-repo +* https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ +* https://gist.github.com/Chaser324/ce0505fbed06b947d962 +* etc ## Get a github account -I assume the account is called "LOGIN", please replace as appropriate below. Log in to github.com using this account. +I assume the account is called **LOGIN**, please replace as appropriate below. Log in to github.com using this account. ## Fork GUDHI/gudhi-devel project -Go to https://github.com/GUDHI/gudhi-devel and click on "fork" (top right). Feel free to also click on the star next to it to show you like the project! +Go to https://github.com/GUDHI/gudhi-devel and click on **fork** (top right). +Feel free to also click on the star next to it to show you like the project! You can see your fork at https://github.com/LOGIN/gudhi-devel ## Create a local clone on your computer @@ -36,10 +37,10 @@ because you want to see the real gudhi, not just your clone. Optional, if you are interested in one of the old branches git remote add oldies https://github.com/GUDHI/branches.git -Or if you want to spy on someone's work. I assume the someone's account is called "SOMEONE" +Or if you want to spy on someone's work. I assume the someone's account is called **SOMEONE** git remote add someone https://github.com/SOMEONE/gudhi-devel.git -## Download +## Stay up-to-date ```bash git fetch -p --all ``` @@ -48,8 +49,10 @@ It tells git to check all that happened on github. It is safe, it will not mess with your files. ## Create a branch, based on the current master +```bash git checkout -b some-fancy-name --no-track upstream/master -Your local branch "master" and the one on your github clone are useless and often outdated, but for technical reasons there has to exist at least one branch at all times, it might as well be that one. upstream/master is the real deal, that's what you want to base your new branch on. +``` +Your local branch `master` and the one on your github clone are useless and often outdated, but for technical reasons there has to exist at least one branch at all times, it might as well be that one. upstream/master is the real deal, that's what you want to base your new branch on. ## The real coding is here! Edit files, test, etc. -- cgit v1.2.3 From acdd28ebf3103c133c5a985219972bec2c7a3460 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 14:41:35 +0100 Subject: Fix some typos --- for_dev/how_to_use_github_to_contribute_to_gudhi.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md index b9a7f8f7..1ca40386 100644 --- a/for_dev/how_to_use_github_to_contribute_to_gudhi.md +++ b/for_dev/how_to_use_github_to_contribute_to_gudhi.md @@ -35,10 +35,14 @@ because you want to see the real gudhi, not just your clone. ## Optional remotes Optional, if you are interested in one of the old branches +```bash git remote add oldies https://github.com/GUDHI/branches.git +``` Or if you want to spy on someone's work. I assume the someone's account is called **SOMEONE** +```bash git remote add someone https://github.com/SOMEONE/gudhi-devel.git +``` ## Stay up-to-date ```bash @@ -89,7 +93,7 @@ You may want to look at https://github.com/settings/notifications (and other set Some bold reviewer might make changes to your branch. You will then need `git pull` for your local branch to reflect those. ## Your work is merged! -Once your pull request has been closed (your branch merged), you can remove your branch, both locally +Once your pull request has been closed (your branch merged), you can remove your branch, both locally and also the branch on your github fork: ```bash git checkout master # or any other branch, but you cannot remove the branch you are currently in git branch -d some-fancy-name # local branch delete @@ -99,7 +103,7 @@ If you add @VincentRouvreau or @mglisse as collaborator (https://github.com/LOGI ## Keep in touch Create a new branch and keep contributing! + Do not try to reuse an old branch that has already been merged. Make sure you run the fetch command just before creating any new branch, so you don't base it on some outdated version of master. You can also work on several branches at the same time, using `git checkout some-fancy-name` and `git checkout name-of-other-branch` to switch between them (commit before switching or things may get complicated). - -- cgit v1.2.3 From 3253abd27129595f7fcd2be4c2285a93aea98690 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Tue, 11 Feb 2020 17:05:08 +0100 Subject: Update src/python/gudhi/simplex_tree.pyx Co-Authored-By: Marc Glisse --- src/python/gudhi/simplex_tree.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 22978b6e..308b3d2d 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -219,7 +219,7 @@ cdef class SimplexTree: cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end() while it != end: - yield(self.get_ptr().get_simplex_and_filtration(dereference(it))) + yield self.get_ptr().get_simplex_and_filtration(dereference(it)) preincrement(it) def get_skeleton(self, dimension): -- cgit v1.2.3 From 3ea44646f04648d1a456a0fb9526035101fc17ea Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 17:20:24 +0100 Subject: Code review: non-optimal way to test filtration generator --- src/python/test/test_alpha_complex.py | 49 ++++++------- src/python/test/test_euclidean_witness_complex.py | 45 +++++------- src/python/test/test_rips_complex.py | 50 +++++++------ src/python/test/test_simplex_tree.py | 88 +++++++++++------------ src/python/test/test_tangential_complex.py | 17 +++-- 5 files changed, 117 insertions(+), 132 deletions(-) diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py index ceead919..77121302 100755 --- a/src/python/test/test_alpha_complex.py +++ b/src/python/test/test_alpha_complex.py @@ -40,20 +40,19 @@ def test_infinite_alpha(): assert simplex_tree.num_simplices() == 11 assert simplex_tree.num_vertices() == 4 - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([3], 0.0)) - assert(next(filtration_generator) == ([0, 1], 0.25)) - assert(next(filtration_generator) == ([0, 2], 0.25)) - assert(next(filtration_generator) == ([1, 3], 0.25)) - assert(next(filtration_generator) == ([2, 3], 0.25)) - assert(next(filtration_generator) == ([1, 2], 0.5)) - assert(next(filtration_generator) == ([0, 1, 2], 0.5)) - assert(next(filtration_generator) == ([1, 2, 3], 0.5)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([3], 0.0), + ([0, 1], 0.25), + ([0, 2], 0.25), + ([1, 3], 0.25), + ([2, 3], 0.25), + ([1, 2], 0.5), + ([0, 1, 2], 0.5), + ([1, 2, 3], 0.5), + ] assert simplex_tree.get_star([0]) == [ ([0], 0.0), @@ -107,18 +106,16 @@ def test_filtered_alpha(): else: assert False - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([3], 0.0)) - assert(next(filtration_generator) == ([0, 1], 0.25)) - assert(next(filtration_generator) == ([0, 2], 0.25)) - assert(next(filtration_generator) == ([1, 3], 0.25)) - assert(next(filtration_generator) == ([2, 3], 0.25)) - with pytest.raises(StopIteration): - next(filtration_generator) - + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([3], 0.0), + ([0, 1], 0.25), + ([0, 2], 0.25), + ([1, 3], 0.25), + ([2, 3], 0.25), + ] assert simplex_tree.get_star([0]) == [([0], 0.0), ([0, 1], 0.25), ([0, 2], 0.25)] assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)] diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py index 16ff1ef4..47196a2a 100755 --- a/src/python/test/test_euclidean_witness_complex.py +++ b/src/python/test/test_euclidean_witness_complex.py @@ -41,16 +41,15 @@ def test_witness_complex(): assert landmarks[1] == euclidean_witness_complex.get_point(1) assert landmarks[2] == euclidean_witness_complex.get_point(2) - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([0, 1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([0, 2], 0.0)) - assert(next(filtration_generator) == ([1, 2], 0.0)) - assert(next(filtration_generator) == ([0, 1, 2], 0.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([0, 1], 0.0), + ([2], 0.0), + ([0, 2], 0.0), + ([1, 2], 0.0), + ([0, 1, 2], 0.0), + ] def test_empty_euclidean_strong_witness_complex(): @@ -80,24 +79,18 @@ def test_strong_witness_complex(): assert landmarks[1] == euclidean_strong_witness_complex.get_point(1) assert landmarks[2] == euclidean_strong_witness_complex.get_point(2) - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [([0], 0.0), ([1], 0.0), ([2], 0.0)] simplex_tree = euclidean_strong_witness_complex.create_simplex_tree( max_alpha_square=100.0 ) - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([1, 2], 15.0)) - assert(next(filtration_generator) == ([0, 2], 34.0)) - assert(next(filtration_generator) == ([0, 1], 37.0)) - assert(next(filtration_generator) == ([0, 1, 2], 37.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([1, 2], 15.0), + ([0, 2], 34.0), + ([0, 1], 37.0), + ([0, 1, 2], 37.0), + ] diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py index bd31c47c..f5c086cb 100755 --- a/src/python/test/test_rips_complex.py +++ b/src/python/test/test_rips_complex.py @@ -33,19 +33,18 @@ def test_rips_from_points(): assert simplex_tree.num_simplices() == 10 assert simplex_tree.num_vertices() == 4 - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([3], 0.0)) - assert(next(filtration_generator) == ([0, 1], 1.0)) - assert(next(filtration_generator) == ([0, 2], 1.0)) - assert(next(filtration_generator) == ([1, 3], 1.0)) - assert(next(filtration_generator) == ([2, 3], 1.0)) - assert(next(filtration_generator) == ([1, 2], 1.4142135623730951)) - assert(next(filtration_generator) == ([0, 3], 1.4142135623730951)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([3], 0.0), + ([0, 1], 1.0), + ([0, 2], 1.0), + ([1, 3], 1.0), + ([2, 3], 1.0), + ([1, 2], 1.4142135623730951), + ([0, 3], 1.4142135623730951), + ] assert simplex_tree.get_star([0]) == [ ([0], 0.0), @@ -98,19 +97,18 @@ def test_rips_from_distance_matrix(): assert simplex_tree.num_simplices() == 10 assert simplex_tree.num_vertices() == 4 - filtration_generator = simplex_tree.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([3], 0.0)) - assert(next(filtration_generator) == ([0, 1], 1.0)) - assert(next(filtration_generator) == ([0, 2], 1.0)) - assert(next(filtration_generator) == ([1, 3], 1.0)) - assert(next(filtration_generator) == ([2, 3], 1.0)) - assert(next(filtration_generator) == ([1, 2], 1.4142135623730951)) - assert(next(filtration_generator) == ([0, 3], 1.4142135623730951)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(simplex_tree.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([3], 0.0), + ([0, 1], 1.0), + ([0, 2], 1.0), + ([1, 3], 1.0), + ([2, 3], 1.0), + ([1, 2], 1.4142135623730951), + ([0, 3], 1.4142135623730951), + ] assert simplex_tree.get_star([0]) == [ ([0], 0.0), diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 0f3db7ac..fa42f2ac 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -128,57 +128,55 @@ def test_expansion(): assert st.num_vertices() == 7 assert st.num_simplices() == 17 - filtration_generator = st.get_filtration() - assert(next(filtration_generator) == ([2], 0.1)) - assert(next(filtration_generator) == ([3], 0.1)) - assert(next(filtration_generator) == ([2, 3], 0.1)) - assert(next(filtration_generator) == ([0], 0.2)) - assert(next(filtration_generator) == ([0, 2], 0.2)) - assert(next(filtration_generator) == ([1], 0.3)) - assert(next(filtration_generator) == ([0, 1], 0.3)) - assert(next(filtration_generator) == ([1, 3], 0.4)) - assert(next(filtration_generator) == ([1, 2], 0.5)) - assert(next(filtration_generator) == ([5], 0.6)) - assert(next(filtration_generator) == ([6], 0.6)) - assert(next(filtration_generator) == ([5, 6], 0.6)) - assert(next(filtration_generator) == ([4], 0.7)) - assert(next(filtration_generator) == ([2, 4], 0.7)) - assert(next(filtration_generator) == ([0, 3], 0.8)) - assert(next(filtration_generator) == ([4, 6], 0.9)) - assert(next(filtration_generator) == ([3, 6], 1.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(st.get_filtration()) == [ + ([2], 0.1), + ([3], 0.1), + ([2, 3], 0.1), + ([0], 0.2), + ([0, 2], 0.2), + ([1], 0.3), + ([0, 1], 0.3), + ([1, 3], 0.4), + ([1, 2], 0.5), + ([5], 0.6), + ([6], 0.6), + ([5, 6], 0.6), + ([4], 0.7), + ([2, 4], 0.7), + ([0, 3], 0.8), + ([4, 6], 0.9), + ([3, 6], 1.0), + ] st.expansion(3) assert st.num_vertices() == 7 assert st.num_simplices() == 22 st.initialize_filtration() - filtration_generator = st.get_filtration() - assert(next(filtration_generator) == ([2], 0.1)) - assert(next(filtration_generator) == ([3], 0.1)) - assert(next(filtration_generator) == ([2, 3], 0.1)) - assert(next(filtration_generator) == ([0], 0.2)) - assert(next(filtration_generator) == ([0, 2], 0.2)) - assert(next(filtration_generator) == ([1], 0.3)) - assert(next(filtration_generator) == ([0, 1], 0.3)) - assert(next(filtration_generator) == ([1, 3], 0.4)) - assert(next(filtration_generator) == ([1, 2], 0.5)) - assert(next(filtration_generator) == ([0, 1, 2], 0.5)) - assert(next(filtration_generator) == ([1, 2, 3], 0.5)) - assert(next(filtration_generator) == ([5], 0.6)) - assert(next(filtration_generator) == ([6], 0.6)) - assert(next(filtration_generator) == ([5, 6], 0.6)) - assert(next(filtration_generator) == ([4], 0.7)) - assert(next(filtration_generator) == ([2, 4], 0.7)) - assert(next(filtration_generator) == ([0, 3], 0.8)) - assert(next(filtration_generator) == ([0, 1, 3], 0.8)) - assert(next(filtration_generator) == ([0, 2, 3], 0.8)) - assert(next(filtration_generator) == ([0, 1, 2, 3], 0.8)) - assert(next(filtration_generator) == ([4, 6], 0.9)) - assert(next(filtration_generator) == ([3, 6], 1.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(st.get_filtration()) == [ + ([2], 0.1), + ([3], 0.1), + ([2, 3], 0.1), + ([0], 0.2), + ([0, 2], 0.2), + ([1], 0.3), + ([0, 1], 0.3), + ([1, 3], 0.4), + ([1, 2], 0.5), + ([0, 1, 2], 0.5), + ([1, 2, 3], 0.5), + ([5], 0.6), + ([6], 0.6), + ([5, 6], 0.6), + ([4], 0.7), + ([2, 4], 0.7), + ([0, 3], 0.8), + ([0, 1, 3], 0.8), + ([0, 2, 3], 0.8), + ([0, 1, 2, 3], 0.8), + ([4, 6], 0.9), + ([3, 6], 1.0), + ] def test_automatic_dimension(): diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py index 90e2c75b..fc500c45 100755 --- a/src/python/test/test_tangential_complex.py +++ b/src/python/test/test_tangential_complex.py @@ -38,15 +38,14 @@ def test_tangential(): assert st.num_simplices() == 6 assert st.num_vertices() == 4 - filtration_generator = st.get_filtration() - assert(next(filtration_generator) == ([0], 0.0)) - assert(next(filtration_generator) == ([1], 0.0)) - assert(next(filtration_generator) == ([2], 0.0)) - assert(next(filtration_generator) == ([0, 2], 0.0)) - assert(next(filtration_generator) == ([3], 0.0)) - assert(next(filtration_generator) == ([1, 3], 0.0)) - with pytest.raises(StopIteration): - next(filtration_generator) + assert list(st.get_filtration()) == [ + ([0], 0.0), + ([1], 0.0), + ([2], 0.0), + ([0, 2], 0.0), + ([3], 0.0), + ([1, 3], 0.0), + ] assert st.get_cofaces([0], 1) == [([0, 2], 0.0)] -- cgit v1.2.3 From b3bd147a06b013efca688ef5dafdfa732a036346 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 11 Feb 2020 17:50:50 +0100 Subject: Add code style tools and modify python copyright with 120 characters [skip ci] --- for_dev/code_conventions.md | 17 ++++++++++++++++- for_dev/copyright_template.py | 6 ++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/for_dev/code_conventions.md b/for_dev/code_conventions.md index 5882f78e..7f9f7471 100644 --- a/for_dev/code_conventions.md +++ b/for_dev/code_conventions.md @@ -21,6 +21,21 @@ * The name of the "project" should be in this form: `Package_[tests|examples|…]`. E.g. `project(Simplex_tree_examples)`. * The name if each "target" (first parameter of add_executable) should be in this form: `Package_{name of the cpp file without extension}`. E.g `add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)`. +### Code style +We are using [google c++ style guide](https://google.github.io/styleguide/cppguide.html) recommendations with 120 characters per line of code. +[clang-format](https://clang.llvm.org/docs/ClangFormat.html) can be used to format automatically your code: +```bash +cd src # there is a .clang-format file with these specifications +clang-format -style=file -i Simplex_tree/include/gudhi/Simplex_tree.h # -i means in place, your file will be modified +``` + ## Python -In progress... \ No newline at end of file +In progress... + +### Code style +We are using [PEP8 Python style guide](https://www.python.org/dev/peps/pep-0008/) recommendations with 120 characters per line of code. +[black](https://black.readthedocs.io/en/stable/) can be used to format automatically your code: +```bash +black -l 120 src/python/example/bottleneck_basic_example.py +``` diff --git a/for_dev/copyright_template.py b/for_dev/copyright_template.py index 19de05e2..667f985d 100644 --- a/for_dev/copyright_template.py +++ b/for_dev/copyright_template.py @@ -1,7 +1,5 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - -# which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license -# details. +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. # Author(s): [AUTHOR NAME] # # Copyright (C) [YEAR] [COPYRIGHT] -- cgit v1.2.3 From 79de1437cb2fa0ab69465a2f2feabe09a12056eb Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Tue, 11 Feb 2020 17:51:40 +0100 Subject: Update src/python/include/Simplex_tree_interface.h Co-Authored-By: Marc Glisse --- src/python/include/Simplex_tree_interface.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index c0bbc3d9..878919cc 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -88,7 +88,7 @@ class Simplex_tree_interface : public Simplex_tree { for (auto vertex : Base::simplex_vertex_range(f_simplex)) { simplex.insert(simplex.begin(), vertex); } - return std::make_pair(simplex, Base::filtration(f_simplex)); + return std::make_pair(std::move(simplex), Base::filtration(f_simplex)); } Filtered_simplices get_skeleton(int dimension) { -- cgit v1.2.3 From ab018a79b1d71b9db17056303785517934cd9157 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 12 Feb 2020 10:08:06 +0100 Subject: Fix code coverage generation --- src/cmake/modules/GUDHI_boost_test.cmake | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmake/modules/GUDHI_boost_test.cmake b/src/cmake/modules/GUDHI_boost_test.cmake index 3b9da78f..4a13404b 100644 --- a/src/cmake/modules/GUDHI_boost_test.cmake +++ b/src/cmake/modules/GUDHI_boost_test.cmake @@ -9,7 +9,6 @@ if (WITH_GUDHI_BOOST_TEST_COVERAGE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg") endif() set(GUDHI_UT_LOG_FORMAT "--log_format=XML") - set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml") set(GUDHI_UT_LOG_LEVEL "--log_level=test_suite") set(GUDHI_UT_REPORT_LEVEL "--report_level=no") else (WITH_GUDHI_BOOST_TEST_COVERAGE) @@ -19,6 +18,10 @@ else (WITH_GUDHI_BOOST_TEST_COVERAGE) endif(WITH_GUDHI_BOOST_TEST_COVERAGE) function(gudhi_add_boost_test unitary_test) + if (WITH_GUDHI_BOOST_TEST_COVERAGE) + set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml") + endif(WITH_GUDHI_BOOST_TEST_COVERAGE) + target_link_libraries(${unitary_test} Boost::unit_test_framework) add_test(NAME ${unitary_test} COMMAND $ ${GUDHI_UT_LOG_FORMAT} ${GUDHI_UT_LOG_SINK} -- cgit v1.2.3 From 89911f674b79c930a6f936a092748e4070d99e46 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 12 Feb 2020 10:08:45 +0100 Subject: Make boost available even if cmake is < 3.5 --- CMakeLists.txt | 2 +- src/CMakeLists.txt | 2 +- .../modules/GUDHI_third_party_libraries.cmake | 35 ++++++++++++++++++++-- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 298e71ca..d9244dc0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.5) +cmake_minimum_required(VERSION 3.1) project(GUDHIdev) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0e799a3a..561aa049 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.5) +cmake_minimum_required(VERSION 3.1) project(GUDHI) diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 10b2b56a..6f01dc85 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -6,6 +6,38 @@ if(NOT Boost_FOUND) message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.") endif(NOT Boost_FOUND) +# cf. https://cliutils.gitlab.io/modern-cmake/chapters/packages/Boost.html +# This is needed if your Boost version is newer than your CMake version +# or if you have an old version of CMake (<3.5) +if(NOT TARGET Boost::program_options) + add_library(Boost::program_options IMPORTED INTERFACE) + set_property(TARGET Boost::program_options PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIR}) + set_property(TARGET Boost::program_options PROPERTY + INTERFACE_LINK_LIBRARIES ${Boost_LIBRARIES}) +endif() +if(NOT TARGET Boost::filesystem) + add_library(Boost::filesystem IMPORTED INTERFACE) + set_property(TARGET Boost::filesystem PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIR}) + set_property(TARGET Boost::filesystem PROPERTY + INTERFACE_LINK_LIBRARIES ${Boost_LIBRARIES}) +endif() +if(NOT TARGET Boost::unit_test_framework) + add_library(Boost::unit_test_framework IMPORTED INTERFACE) + set_property(TARGET Boost::unit_test_framework PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIR}) + set_property(TARGET Boost::unit_test_framework PROPERTY + INTERFACE_LINK_LIBRARIES ${Boost_LIBRARIES}) +endif() +if(NOT TARGET Boost::system) + add_library(Boost::system IMPORTED INTERFACE) + set_property(TARGET Boost::system PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIR}) + set_property(TARGET Boost::system PROPERTY + INTERFACE_LINK_LIBRARIES ${Boost_LIBRARIES}) +endif() + find_package(GMP) if(GMP_FOUND) INCLUDE_DIRECTORIES(${GMP_INCLUDE_DIR}) @@ -82,9 +114,6 @@ add_definitions( -DBOOST_ALL_DYN_LINK ) # problem on Mac with boost_system and boost_thread add_definitions( -DBOOST_SYSTEM_NO_DEPRECATED ) -#INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS}) -#LINK_DIRECTORIES(${Boost_LIBRARY_DIRS}) - message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS}) message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS}) -- cgit v1.2.3 From 73ad191a7dee054a58e9823c84dce9f1e71995f4 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 12 Feb 2020 10:24:25 +0100 Subject: Fix tests according to exception management --- src/python/gudhi/cubical_complex.pyx | 1 + src/python/test/test_cover_complex.py | 4 +++- src/python/test/test_cubical_complex.py | 6 +++--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 31287d15..d5ad1266 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -17,6 +17,7 @@ from libcpp.string cimport string from libcpp cimport bool import errno import os +import sys import numpy as np diff --git a/src/python/test/test_cover_complex.py b/src/python/test/test_cover_complex.py index 32bc5a26..260f6a5c 100755 --- a/src/python/test/test_cover_complex.py +++ b/src/python/test/test_cover_complex.py @@ -9,6 +9,7 @@ """ from gudhi import CoverComplex +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2018 Inria" @@ -24,7 +25,8 @@ def test_empty_constructor(): def test_non_existing_file_read(): # Try to open a non existing file cover = CoverComplex() - assert cover.read_point_cloud("pouetpouettralala.toubiloubabdou") == False + with pytest.raises(FileNotFoundError): + cover.read_point_cloud("pouetpouettralala.toubiloubabdou") def test_files_creation(): diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py index 8c1b2600..fce4875c 100755 --- a/src/python/test/test_cubical_complex.py +++ b/src/python/test/test_cubical_complex.py @@ -10,6 +10,7 @@ from gudhi import CubicalComplex, PeriodicCubicalComplex import numpy as np +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -25,9 +26,8 @@ def test_empty_constructor(): def test_non_existing_perseus_file_constructor(): # Try to open a non existing file - cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou") - assert cub.__is_defined() == False - assert cub.__is_persistence_defined() == False + with pytest.raises(FileNotFoundError): + cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou") def test_dimension_or_perseus_file_constructor(): -- cgit v1.2.3 From 1edb818b38ace05b230319227e60838b796ddfc5 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 11:08:44 +0100 Subject: simplex tree skeleton iterator --- src/python/gudhi/simplex_tree.pxd | 10 +++++++++- src/python/gudhi/simplex_tree.pyx | 15 ++++++--------- src/python/include/Simplex_tree_interface.h | 23 ++++++++++------------- src/python/test/test_simplex_tree.py | 8 ++++---- 4 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 1b0dc881..66c173a6 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -24,6 +24,13 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface::Simplex_handle": pass + cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface::Skeleton_simplex_iterator": + Simplex_tree_skeleton_iterator() + Simplex_tree_simplex_handle& operator*() + Simplex_tree_skeleton_iterator operator++() + bint operator!=(Simplex_tree_skeleton_iterator) + + cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface": Simplex_tree() double simplex_filtration(vector[int] simplex) @@ -37,7 +44,6 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool find_simplex(vector[int] simplex) bool insert_simplex_and_subfaces(vector[int] simplex, double filtration) - vector[pair[vector[int], double]] get_skeleton(int dimension) vector[pair[vector[int], double]] get_star(vector[int] simplex) vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) @@ -49,6 +55,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() + Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) + Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 308b3d2d..efac2d80 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -231,15 +231,12 @@ cdef class SimplexTree: :returns: The (simplices of the) skeleton of a maximum dimension. :rtype: list of tuples(simplex, filtration) """ - cdef vector[pair[vector[int], double]] skeleton \ - = self.get_ptr().get_skeleton(dimension) - ct = [] - for filtered_simplex in skeleton: - v = [] - for vertex in filtered_simplex.first: - v.append(vertex) - ct.append((v, filtered_simplex.second)) - return ct + cdef Simplex_tree_skeleton_iterator it = self.get_ptr().get_skeleton_iterator_begin(dimension) + cdef Simplex_tree_skeleton_iterator end = self.get_ptr().get_skeleton_iterator_end(dimension) + + while it != end: + yield self.get_ptr().get_simplex_and_filtration(dereference(it)) + preincrement(it) def get_star(self, simplex): """This function returns the star of a given N-simplex. diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 878919cc..55d5af97 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -35,6 +35,7 @@ class Simplex_tree_interface : public Simplex_tree { using Simplex = std::vector; using Simplex_and_filtration = std::pair; using Filtered_simplices = std::vector; + using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator; public: bool find_simplex(const Simplex& vh) { @@ -91,18 +92,6 @@ class Simplex_tree_interface : public Simplex_tree { return std::make_pair(std::move(simplex), Base::filtration(f_simplex)); } - Filtered_simplices get_skeleton(int dimension) { - Filtered_simplices skeletons; - for (auto f_simplex : Base::skeleton_simplex_range(dimension)) { - Simplex simplex; - for (auto vertex : Base::simplex_vertex_range(f_simplex)) { - simplex.insert(simplex.begin(), vertex); - } - skeletons.push_back(std::make_pair(simplex, Base::filtration(f_simplex))); - } - return skeletons; - } - Filtered_simplices get_star(const Simplex& simplex) { Filtered_simplices star; for (auto f_simplex : Base::star_simplex_range(Base::find(simplex))) { @@ -134,13 +123,21 @@ class Simplex_tree_interface : public Simplex_tree { // Iterator over the simplex tree typename std::vector::const_iterator get_filtration_iterator_begin() { - Base::initialize_filtration(); + // Base::initialize_filtration(); already performed in filtration_simplex_range return Base::filtration_simplex_range().begin(); } typename std::vector::const_iterator get_filtration_iterator_end() { return Base::filtration_simplex_range().end(); } + + Skeleton_simplex_iterator get_skeleton_iterator_begin(int dimension) { + return Base::skeleton_simplex_range(dimension).begin(); + } + + Skeleton_simplex_iterator get_skeleton_iterator_end(int dimension) { + return Base::skeleton_simplex_range(dimension).end(); + } }; } // namespace Gudhi diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index fa42f2ac..eca3807b 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -56,7 +56,7 @@ def test_insertion(): assert st.filtration([1]) == 0.0 # skeleton test - assert st.get_skeleton(2) == [ + assert list(st.get_skeleton(2)) == [ ([0, 1, 2], 4.0), ([0, 1], 0.0), ([0, 2], 4.0), @@ -65,7 +65,7 @@ def test_insertion(): ([1], 0.0), ([2], 4.0), ] - assert st.get_skeleton(1) == [ + assert list(st.get_skeleton(1)) == [ ([0, 1], 0.0), ([0, 2], 4.0), ([0], 0.0), @@ -73,12 +73,12 @@ def test_insertion(): ([1], 0.0), ([2], 4.0), ] - assert st.get_skeleton(0) == [([0], 0.0), ([1], 0.0), ([2], 4.0)] + assert list(st.get_skeleton(0)) == [([0], 0.0), ([1], 0.0), ([2], 4.0)] # remove_maximal_simplex test assert st.get_cofaces([0, 1, 2], 1) == [] st.remove_maximal_simplex([0, 1, 2]) - assert st.get_skeleton(2) == [ + assert list(st.get_skeleton(2)) == [ ([0, 1], 0.0), ([0, 2], 4.0), ([0], 0.0), -- cgit v1.2.3 From 939b2a8bc88d9ed45fd2f01727498042ef137e04 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 16:45:19 +0100 Subject: [skip ci] link to templates --- for_dev/code_conventions.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/for_dev/code_conventions.md b/for_dev/code_conventions.md index 7f9f7471..9724f722 100644 --- a/for_dev/code_conventions.md +++ b/for_dev/code_conventions.md @@ -29,6 +29,9 @@ cd src # there is a .clang-format file with these specifications clang-format -style=file -i Simplex_tree/include/gudhi/Simplex_tree.h # -i means in place, your file will be modified ``` +### Template +Please use the file [following template](copyright_template.h). + ## Python In progress... @@ -39,3 +42,6 @@ We are using [PEP8 Python style guide](https://www.python.org/dev/peps/pep-0008/ ```bash black -l 120 src/python/example/bottleneck_basic_example.py ``` + +### Template +Please use the file [following template](copyright_template.py). -- cgit v1.2.3 From 8b0a7fdfb917147d7263a89cdfa1bc785f62c139 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 16:49:10 +0100 Subject: [skip ci] add a section for submodule --- for_dev/how_to_use_github_to_contribute_to_gudhi.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md index 1ca40386..358825f3 100644 --- a/for_dev/how_to_use_github_to_contribute_to_gudhi.md +++ b/for_dev/how_to_use_github_to_contribute_to_gudhi.md @@ -25,6 +25,13 @@ This creates a directory gudhi-devel, which you are free to move around or renam cd gudhi-devel ``` +## Submodule +An interface to Hera for Wasserstein distanceis available on an external git repository. +Everytime you checkout master or merge from master, afterwards, you will need to run the command: +```bash +git submodule update --init +``` + ## Configuring a remote for a fork ```bash git remote add upstream https://github.com/GUDHI/gudhi-devel.git -- cgit v1.2.3 From 9e97c2b0cfeb5defc51b1358949d8a29adec8767 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 16:49:29 +0100 Subject: [skip ci] typo --- for_dev/how_to_use_github_to_contribute_to_gudhi.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md index 358825f3..0e7d42ef 100644 --- a/for_dev/how_to_use_github_to_contribute_to_gudhi.md +++ b/for_dev/how_to_use_github_to_contribute_to_gudhi.md @@ -26,7 +26,7 @@ cd gudhi-devel ``` ## Submodule -An interface to Hera for Wasserstein distanceis available on an external git repository. +An interface to Hera for Wasserstein distance is available on an external git repository. Everytime you checkout master or merge from master, afterwards, you will need to run the command: ```bash git submodule update --init -- cgit v1.2.3 From 4b0c4bdf4ec3b8ddc7803eff3e08b7a792a9003d Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 16:50:56 +0100 Subject: [skip ci] Add .github folder --- .github/CONTRIBUTING.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .github/CONTRIBUTING.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..e69de29b -- cgit v1.2.3 From 7afba92e26f9e3e78ce0c27b8b9ef29b2f9a8121 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 16:52:58 +0100 Subject: [skip ci] Move all in .github folder --- .github/code_conventions.md | 47 +++++++++ .github/copyright_template.h | 14 +++ .github/copyright_template.py | 8 ++ .../for_maintainers/new_gudhi_version_creation.md | 97 +++++++++++++++++ .github/for_maintainers/next_release_template.md | 28 +++++ .../how_to_use_github_to_contribute_to_gudhi.md | 116 +++++++++++++++++++++ .github/next_release.md | 28 +++++ for_dev/code_conventions.md | 47 --------- for_dev/copyright_template.h | 14 --- for_dev/copyright_template.py | 8 -- .../for_maintainers/new_gudhi_version_creation.md | 97 ----------------- for_dev/for_maintainers/next_release_template.md | 28 ----- .../how_to_use_github_to_contribute_to_gudhi.md | 116 --------------------- for_dev/next_release.md | 28 ----- 14 files changed, 338 insertions(+), 338 deletions(-) create mode 100644 .github/code_conventions.md create mode 100644 .github/copyright_template.h create mode 100644 .github/copyright_template.py create mode 100644 .github/for_maintainers/new_gudhi_version_creation.md create mode 100644 .github/for_maintainers/next_release_template.md create mode 100644 .github/how_to_use_github_to_contribute_to_gudhi.md create mode 100644 .github/next_release.md delete mode 100644 for_dev/code_conventions.md delete mode 100644 for_dev/copyright_template.h delete mode 100644 for_dev/copyright_template.py delete mode 100644 for_dev/for_maintainers/new_gudhi_version_creation.md delete mode 100644 for_dev/for_maintainers/next_release_template.md delete mode 100644 for_dev/how_to_use_github_to_contribute_to_gudhi.md delete mode 100644 for_dev/next_release.md diff --git a/.github/code_conventions.md b/.github/code_conventions.md new file mode 100644 index 00000000..9724f722 --- /dev/null +++ b/.github/code_conventions.md @@ -0,0 +1,47 @@ +# Naming conventions + +## C++ + +### In the code: +* The classes and functions of a package should be in a sub-namespace of the `Gudhi` namespace. The sub-namespace names are in lowercase and use underscore separators. E.g. `Gudhi::package_name::` +* Concepts are named with camel case starting with uppercase. E.g. `PersistentHomology` for the concept of Persitence homology. +* Classes start with an uppercase letter and use underscore separators. E.g. `Skeleton_blocker_contractor`. +* Member functions and free functions are in lowercase and use underscore separators. E.g. `int num_vertices()`. +* Constants and macros are in uppercase. +* Macros should begin with the prefix `GUDHI_`. + +### File names: +* All headers are named *.h and all sources are named *.cpp. +* If a single class or function is provided in a file, its name (with the same letter case) should be used for the file name. +* If a file does not contain a single class, its name should not begin with a capital letter. +* Test files should be called `test_[what_is_tested].cpp`. E.g. `test_sparsify_point_set.cpp` +* Example files should be called `example_[what_it_is].cpp`. E.g. `example_sparsify_point_set.cpp` + +### In CMakeLists.txt files: +* The name of the "project" should be in this form: `Package_[tests|examples|…]`. E.g. `project(Simplex_tree_examples)`. +* The name if each "target" (first parameter of add_executable) should be in this form: `Package_{name of the cpp file without extension}`. E.g `add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)`. + +### Code style +We are using [google c++ style guide](https://google.github.io/styleguide/cppguide.html) recommendations with 120 characters per line of code. +[clang-format](https://clang.llvm.org/docs/ClangFormat.html) can be used to format automatically your code: +```bash +cd src # there is a .clang-format file with these specifications +clang-format -style=file -i Simplex_tree/include/gudhi/Simplex_tree.h # -i means in place, your file will be modified +``` + +### Template +Please use the file [following template](copyright_template.h). + +## Python + +In progress... + +### Code style +We are using [PEP8 Python style guide](https://www.python.org/dev/peps/pep-0008/) recommendations with 120 characters per line of code. +[black](https://black.readthedocs.io/en/stable/) can be used to format automatically your code: +```bash +black -l 120 src/python/example/bottleneck_basic_example.py +``` + +### Template +Please use the file [following template](copyright_template.py). diff --git a/.github/copyright_template.h b/.github/copyright_template.h new file mode 100644 index 00000000..30034f1b --- /dev/null +++ b/.github/copyright_template.h @@ -0,0 +1,14 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): [AUTHOR NAME] + * + * Copyright (C) [YEAR] [COPYRIGHT] + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#ifndef [FILE_NAME]_H_ +#define [FILE_NAME]_H_ + +#endif // [FILE_NAME]_H_ \ No newline at end of file diff --git a/.github/copyright_template.py b/.github/copyright_template.py new file mode 100644 index 00000000..667f985d --- /dev/null +++ b/.github/copyright_template.py @@ -0,0 +1,8 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): [AUTHOR NAME] +# +# Copyright (C) [YEAR] [COPYRIGHT] +# +# Modification(s): +# - YYYY/MM Author: Description of the modification diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md new file mode 100644 index 00000000..4a40f373 --- /dev/null +++ b/.github/for_maintainers/new_gudhi_version_creation.md @@ -0,0 +1,97 @@ +# Create a new GUDHI version + +We will consider that all operations will be performed in a brand new clone of the main project: +```bash +git clone https://github.com/GUDHI/gudhi-devel.git +cd gudhi-devel +``` + +## Version file modification + +**Edit the file CMakeGUDHIVersion.txt**, and increment major, minor, or patch version number, in function of the version new delivery. +```bash +# cf. .gitignore - ignore this if it is a fresh clone version +rm -rf data/points/COIL_database/lucky_cat.off_dist data/points/COIL_database/lucky_cat.off_sc.dot data/points/KleinBottle5D.off_dist data/points/KleinBottle5D.off_sc.dot data/points/human.off_dist data/points/human.off_sc.off data/points/human.off_sc.txt +``` + +Checkin the modifications, build and test the version: +```bash +mkdir build +cd build +cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 .. +make user_version +date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt +tar -czvf gudhi.@GUDHI_VERSION@.tar.gz gudhi.@GUDHI_VERSION@ +md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt +sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt +sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt + +make -j all test +``` + +***[Check there are no error]*** + +## Create the documentation +```bash +mkdir gudhi.doc.@GUDHI_VERSION@ +make doxygen 2>&1 | tee dox.log && grep warning dox.log +``` + +***[Check there are no error and the warnings]*** + +```bash +cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp +cd gudhi.@GUDHI_VERSION@ +rm -rf build; mkdir build; cd build; cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 .. +export LC_ALL=en_US.UTF-8 # cf. bug +make sphinx +``` + +***[Check there are no error]*** + +```bash +cp -R python/sphinx ../../gudhi.doc.@GUDHI_VERSION@/python +cd ../.. +tar -czvf gudhi.doc.@GUDHI_VERSION@.tar.gz gudhi.doc.@GUDHI_VERSION@ + +cd gudhi.@GUDHI_VERSION@/build +make all test +``` + +***[Check there are no error]*** + +## Upload the documentation + +Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/cpp in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/doc/@GUDHI_VERSION@ + +Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/python in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/python/@GUDHI_VERSION@ + +Through ssh, make the **latest** link to your new version of the documentation: +```bash +ssh ForgeLogin@scm.gforge.inria.fr +cd /home/groups/gudhi/htdocs/doc +rm latest +ln -s @GUDHI_VERSION@ latest +cd /home/groups/gudhi/htdocs/python +rm latest +ln -s @GUDHI_VERSION@ latest +``` + +## Put a version label on files + +* Go on page https://github.com/GUDHI/gudhi-devel/releases/new +* Name the tag: tags/gudhi-release-@GUDHI_VERSION@ +* Name the release GUDHI @GUDHI_VERSION@ +* Write the release note +* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *md5sum.txt*, *sha256sum.txt*, *sha512sum.txt* files +* Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version) +* Click the *Publish the release* button + +***[Where X, Y and Z corresponds respectively to the major, minor, and patch version number]*** + + +## Mail sending +Send version mail to the following lists : +* gudhi-devel@lists.gforge.inria.fr +* gudhi-users@lists.gforge.inria.fr (not for release candidate) + diff --git a/.github/for_maintainers/next_release_template.md b/.github/for_maintainers/next_release_template.md new file mode 100644 index 00000000..a2805a55 --- /dev/null +++ b/.github/for_maintainers/next_release_template.md @@ -0,0 +1,28 @@ +We are pleased to announce the release 3.X.X of the GUDHI library. + +As a major new feature, the GUDHI library now offers ... + +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). + +Below is a list of changes made since GUDHI 3.X-1.X-1: + +- [Module](link) + - ... + +- [Module](link) + - ... + +- Miscellaneous + - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. + +All modules are distributed under the terms of the MIT license. +However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. + +We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. + +We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. + +Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. + +For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). + diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md new file mode 100644 index 00000000..0e7d42ef --- /dev/null +++ b/.github/how_to_use_github_to_contribute_to_gudhi.md @@ -0,0 +1,116 @@ +# How to use github to contribute to gudhi + +Similar information is available in many places: +* https://jarv.is/notes/how-to-pull-request-fork-github/ (this one is using `upstream/master` when creating a new branch) +* https://help.github.com/en/github/getting-started-with-github/fork-a-repo +* https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ +* https://gist.github.com/Chaser324/ce0505fbed06b947d962 +* etc + +## Get a github account +I assume the account is called **LOGIN**, please replace as appropriate below. Log in to github.com using this account. + +## Fork GUDHI/gudhi-devel project +Go to https://github.com/GUDHI/gudhi-devel and click on **fork** (top right). +Feel free to also click on the star next to it to show you like the project! +You can see your fork at https://github.com/LOGIN/gudhi-devel + +## Create a local clone on your computer +```bash +git clone https://github.com/LOGIN/gudhi-devel.git +``` + +This creates a directory gudhi-devel, which you are free to move around or rename. For the following, change to that directory: +```bash +cd gudhi-devel +``` + +## Submodule +An interface to Hera for Wasserstein distance is available on an external git repository. +Everytime you checkout master or merge from master, afterwards, you will need to run the command: +```bash +git submodule update --init +``` + +## Configuring a remote for a fork +```bash +git remote add upstream https://github.com/GUDHI/gudhi-devel.git +``` + +because you want to see the real gudhi, not just your clone. +(It is perfectly possible to do things in the reverse order, clone from GUDHI and add the one in LOGIN as extra remote, but the names of the remotes may not match the rest of this document. You can change the name of a remote with `git remote rename oldname newname`) + +## Optional remotes +Optional, if you are interested in one of the old branches +```bash +git remote add oldies https://github.com/GUDHI/branches.git +``` + +Or if you want to spy on someone's work. I assume the someone's account is called **SOMEONE** +```bash +git remote add someone https://github.com/SOMEONE/gudhi-devel.git +``` + +## Stay up-to-date +```bash +git fetch -p --all +``` +This is a command you can run quite regularly. +It tells git to check all that happened on github. +It is safe, it will not mess with your files. + +## Create a branch, based on the current master +```bash +git checkout -b some-fancy-name --no-track upstream/master +``` +Your local branch `master` and the one on your github clone are useless and often outdated, but for technical reasons there has to exist at least one branch at all times, it might as well be that one. upstream/master is the real deal, that's what you want to base your new branch on. + +## The real coding is here! +Edit files, test, etc. + +## Commit your changes (locally) +The basic command is just `git commit`, but it will do nothing by default. +You need `git add my_new_file` for every new file you want to commit. +And usually you'll want to use `git commit -a` so that all files that git already knows about and that have been modified get committed. + +## Push your changes (remotely) +```bash +git push -u origin some-fancy-name +``` +This puts a copy of your branch on your online clone of gudhi-devel. +Because of `-u`, it will remember where you like to push this branch, and next time you can just use `git push`. + +## Play again! +Possibly iterate a few times, add more commits and push them. + +## Your pull request is ready +Get your web browser to https://github.com/LOGIN/gudhi-devel, click on the button that says **Branch: some-name** (below the number of commits, above the list of files) and select the branch you are so proud of. +Click on **New pull request** next to it. + +## Follow the instructions ;-) +Note that if your branch is not quite ready, you can make a **draft pull request** (see the arrow next to the confirmation button), and later you will have access to a button to say that the branch is ready for reviews now. +Draft pull requests can be a way to advertise that you are working on something, and possibly ask others for comments or help. + +## Code review +Make sure you follow the discussion on your pull request, answer questions, take comments into account. +You can keep pushing new commits on your branch to your fork of gudhi-devel, the pull request will automatically notice the new commits there. +There is no need to create a new pull request. +Once the branch is under review, fixing issues is good, but please refrain from adding extra features, that just makes the reviewers' job harder and thus slower. +You may want to look at https://github.com/settings/notifications (and other settings nearby) if you don't receive emails when people comment on your pull request. +Some bold reviewer might make changes to your branch. You will then need `git pull` for your local branch to reflect those. + +## Your work is merged! +Once your pull request has been closed (your branch merged), you can remove your branch, both locally and also the branch on your github fork: +```bash +git checkout master # or any other branch, but you cannot remove the branch you are currently in +git branch -d some-fancy-name # local branch delete +git push origin --delete some-fancy-name # remote branch delete +``` +If you add @VincentRouvreau or @mglisse as collaborator (https://github.com/LOGIN/gudhi-devel/settings/collaboration), they may remove the branch on your clone at the same time as they merge the branch, so you only have the local one to remove (or keep if you are nostalgic). + +## Keep in touch +Create a new branch and keep contributing! + +Do not try to reuse an old branch that has already been merged. +Make sure you run the fetch command just before creating any new branch, so you don't base it on some outdated version of master. +You can also work on several branches at the same time, using `git checkout some-fancy-name` and `git checkout name-of-other-branch` to switch between them (commit before switching or things may get complicated). diff --git a/.github/next_release.md b/.github/next_release.md new file mode 100644 index 00000000..a2805a55 --- /dev/null +++ b/.github/next_release.md @@ -0,0 +1,28 @@ +We are pleased to announce the release 3.X.X of the GUDHI library. + +As a major new feature, the GUDHI library now offers ... + +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). + +Below is a list of changes made since GUDHI 3.X-1.X-1: + +- [Module](link) + - ... + +- [Module](link) + - ... + +- Miscellaneous + - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. + +All modules are distributed under the terms of the MIT license. +However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. + +We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. + +We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. + +Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. + +For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). + diff --git a/for_dev/code_conventions.md b/for_dev/code_conventions.md deleted file mode 100644 index 9724f722..00000000 --- a/for_dev/code_conventions.md +++ /dev/null @@ -1,47 +0,0 @@ -# Naming conventions - -## C++ - -### In the code: -* The classes and functions of a package should be in a sub-namespace of the `Gudhi` namespace. The sub-namespace names are in lowercase and use underscore separators. E.g. `Gudhi::package_name::` -* Concepts are named with camel case starting with uppercase. E.g. `PersistentHomology` for the concept of Persitence homology. -* Classes start with an uppercase letter and use underscore separators. E.g. `Skeleton_blocker_contractor`. -* Member functions and free functions are in lowercase and use underscore separators. E.g. `int num_vertices()`. -* Constants and macros are in uppercase. -* Macros should begin with the prefix `GUDHI_`. - -### File names: -* All headers are named *.h and all sources are named *.cpp. -* If a single class or function is provided in a file, its name (with the same letter case) should be used for the file name. -* If a file does not contain a single class, its name should not begin with a capital letter. -* Test files should be called `test_[what_is_tested].cpp`. E.g. `test_sparsify_point_set.cpp` -* Example files should be called `example_[what_it_is].cpp`. E.g. `example_sparsify_point_set.cpp` - -### In CMakeLists.txt files: -* The name of the "project" should be in this form: `Package_[tests|examples|…]`. E.g. `project(Simplex_tree_examples)`. -* The name if each "target" (first parameter of add_executable) should be in this form: `Package_{name of the cpp file without extension}`. E.g `add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)`. - -### Code style -We are using [google c++ style guide](https://google.github.io/styleguide/cppguide.html) recommendations with 120 characters per line of code. -[clang-format](https://clang.llvm.org/docs/ClangFormat.html) can be used to format automatically your code: -```bash -cd src # there is a .clang-format file with these specifications -clang-format -style=file -i Simplex_tree/include/gudhi/Simplex_tree.h # -i means in place, your file will be modified -``` - -### Template -Please use the file [following template](copyright_template.h). - -## Python - -In progress... - -### Code style -We are using [PEP8 Python style guide](https://www.python.org/dev/peps/pep-0008/) recommendations with 120 characters per line of code. -[black](https://black.readthedocs.io/en/stable/) can be used to format automatically your code: -```bash -black -l 120 src/python/example/bottleneck_basic_example.py -``` - -### Template -Please use the file [following template](copyright_template.py). diff --git a/for_dev/copyright_template.h b/for_dev/copyright_template.h deleted file mode 100644 index 30034f1b..00000000 --- a/for_dev/copyright_template.h +++ /dev/null @@ -1,14 +0,0 @@ -/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - * Author(s): [AUTHOR NAME] - * - * Copyright (C) [YEAR] [COPYRIGHT] - * - * Modification(s): - * - YYYY/MM Author: Description of the modification - */ - -#ifndef [FILE_NAME]_H_ -#define [FILE_NAME]_H_ - -#endif // [FILE_NAME]_H_ \ No newline at end of file diff --git a/for_dev/copyright_template.py b/for_dev/copyright_template.py deleted file mode 100644 index 667f985d..00000000 --- a/for_dev/copyright_template.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): [AUTHOR NAME] -# -# Copyright (C) [YEAR] [COPYRIGHT] -# -# Modification(s): -# - YYYY/MM Author: Description of the modification diff --git a/for_dev/for_maintainers/new_gudhi_version_creation.md b/for_dev/for_maintainers/new_gudhi_version_creation.md deleted file mode 100644 index 4a40f373..00000000 --- a/for_dev/for_maintainers/new_gudhi_version_creation.md +++ /dev/null @@ -1,97 +0,0 @@ -# Create a new GUDHI version - -We will consider that all operations will be performed in a brand new clone of the main project: -```bash -git clone https://github.com/GUDHI/gudhi-devel.git -cd gudhi-devel -``` - -## Version file modification - -**Edit the file CMakeGUDHIVersion.txt**, and increment major, minor, or patch version number, in function of the version new delivery. -```bash -# cf. .gitignore - ignore this if it is a fresh clone version -rm -rf data/points/COIL_database/lucky_cat.off_dist data/points/COIL_database/lucky_cat.off_sc.dot data/points/KleinBottle5D.off_dist data/points/KleinBottle5D.off_sc.dot data/points/human.off_dist data/points/human.off_sc.off data/points/human.off_sc.txt -``` - -Checkin the modifications, build and test the version: -```bash -mkdir build -cd build -cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 .. -make user_version -date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt -tar -czvf gudhi.@GUDHI_VERSION@.tar.gz gudhi.@GUDHI_VERSION@ -md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt -sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt -sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt - -make -j all test -``` - -***[Check there are no error]*** - -## Create the documentation -```bash -mkdir gudhi.doc.@GUDHI_VERSION@ -make doxygen 2>&1 | tee dox.log && grep warning dox.log -``` - -***[Check there are no error and the warnings]*** - -```bash -cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp -cd gudhi.@GUDHI_VERSION@ -rm -rf build; mkdir build; cd build; cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 .. -export LC_ALL=en_US.UTF-8 # cf. bug -make sphinx -``` - -***[Check there are no error]*** - -```bash -cp -R python/sphinx ../../gudhi.doc.@GUDHI_VERSION@/python -cd ../.. -tar -czvf gudhi.doc.@GUDHI_VERSION@.tar.gz gudhi.doc.@GUDHI_VERSION@ - -cd gudhi.@GUDHI_VERSION@/build -make all test -``` - -***[Check there are no error]*** - -## Upload the documentation - -Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/cpp in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/doc/@GUDHI_VERSION@ - -Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/python in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/python/@GUDHI_VERSION@ - -Through ssh, make the **latest** link to your new version of the documentation: -```bash -ssh ForgeLogin@scm.gforge.inria.fr -cd /home/groups/gudhi/htdocs/doc -rm latest -ln -s @GUDHI_VERSION@ latest -cd /home/groups/gudhi/htdocs/python -rm latest -ln -s @GUDHI_VERSION@ latest -``` - -## Put a version label on files - -* Go on page https://github.com/GUDHI/gudhi-devel/releases/new -* Name the tag: tags/gudhi-release-@GUDHI_VERSION@ -* Name the release GUDHI @GUDHI_VERSION@ -* Write the release note -* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *md5sum.txt*, *sha256sum.txt*, *sha512sum.txt* files -* Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version) -* Click the *Publish the release* button - -***[Where X, Y and Z corresponds respectively to the major, minor, and patch version number]*** - - -## Mail sending -Send version mail to the following lists : -* gudhi-devel@lists.gforge.inria.fr -* gudhi-users@lists.gforge.inria.fr (not for release candidate) - diff --git a/for_dev/for_maintainers/next_release_template.md b/for_dev/for_maintainers/next_release_template.md deleted file mode 100644 index a2805a55..00000000 --- a/for_dev/for_maintainers/next_release_template.md +++ /dev/null @@ -1,28 +0,0 @@ -We are pleased to announce the release 3.X.X of the GUDHI library. - -As a major new feature, the GUDHI library now offers ... - -We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). - -Below is a list of changes made since GUDHI 3.X-1.X-1: - -- [Module](link) - - ... - -- [Module](link) - - ... - -- Miscellaneous - - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. - -All modules are distributed under the terms of the MIT license. -However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. - -We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. - -We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. - -Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. - -For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). - diff --git a/for_dev/how_to_use_github_to_contribute_to_gudhi.md b/for_dev/how_to_use_github_to_contribute_to_gudhi.md deleted file mode 100644 index 0e7d42ef..00000000 --- a/for_dev/how_to_use_github_to_contribute_to_gudhi.md +++ /dev/null @@ -1,116 +0,0 @@ -# How to use github to contribute to gudhi - -Similar information is available in many places: -* https://jarv.is/notes/how-to-pull-request-fork-github/ (this one is using `upstream/master` when creating a new branch) -* https://help.github.com/en/github/getting-started-with-github/fork-a-repo -* https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/ -* https://gist.github.com/Chaser324/ce0505fbed06b947d962 -* etc - -## Get a github account -I assume the account is called **LOGIN**, please replace as appropriate below. Log in to github.com using this account. - -## Fork GUDHI/gudhi-devel project -Go to https://github.com/GUDHI/gudhi-devel and click on **fork** (top right). -Feel free to also click on the star next to it to show you like the project! -You can see your fork at https://github.com/LOGIN/gudhi-devel - -## Create a local clone on your computer -```bash -git clone https://github.com/LOGIN/gudhi-devel.git -``` - -This creates a directory gudhi-devel, which you are free to move around or rename. For the following, change to that directory: -```bash -cd gudhi-devel -``` - -## Submodule -An interface to Hera for Wasserstein distance is available on an external git repository. -Everytime you checkout master or merge from master, afterwards, you will need to run the command: -```bash -git submodule update --init -``` - -## Configuring a remote for a fork -```bash -git remote add upstream https://github.com/GUDHI/gudhi-devel.git -``` - -because you want to see the real gudhi, not just your clone. -(It is perfectly possible to do things in the reverse order, clone from GUDHI and add the one in LOGIN as extra remote, but the names of the remotes may not match the rest of this document. You can change the name of a remote with `git remote rename oldname newname`) - -## Optional remotes -Optional, if you are interested in one of the old branches -```bash -git remote add oldies https://github.com/GUDHI/branches.git -``` - -Or if you want to spy on someone's work. I assume the someone's account is called **SOMEONE** -```bash -git remote add someone https://github.com/SOMEONE/gudhi-devel.git -``` - -## Stay up-to-date -```bash -git fetch -p --all -``` -This is a command you can run quite regularly. -It tells git to check all that happened on github. -It is safe, it will not mess with your files. - -## Create a branch, based on the current master -```bash -git checkout -b some-fancy-name --no-track upstream/master -``` -Your local branch `master` and the one on your github clone are useless and often outdated, but for technical reasons there has to exist at least one branch at all times, it might as well be that one. upstream/master is the real deal, that's what you want to base your new branch on. - -## The real coding is here! -Edit files, test, etc. - -## Commit your changes (locally) -The basic command is just `git commit`, but it will do nothing by default. -You need `git add my_new_file` for every new file you want to commit. -And usually you'll want to use `git commit -a` so that all files that git already knows about and that have been modified get committed. - -## Push your changes (remotely) -```bash -git push -u origin some-fancy-name -``` -This puts a copy of your branch on your online clone of gudhi-devel. -Because of `-u`, it will remember where you like to push this branch, and next time you can just use `git push`. - -## Play again! -Possibly iterate a few times, add more commits and push them. - -## Your pull request is ready -Get your web browser to https://github.com/LOGIN/gudhi-devel, click on the button that says **Branch: some-name** (below the number of commits, above the list of files) and select the branch you are so proud of. -Click on **New pull request** next to it. - -## Follow the instructions ;-) -Note that if your branch is not quite ready, you can make a **draft pull request** (see the arrow next to the confirmation button), and later you will have access to a button to say that the branch is ready for reviews now. -Draft pull requests can be a way to advertise that you are working on something, and possibly ask others for comments or help. - -## Code review -Make sure you follow the discussion on your pull request, answer questions, take comments into account. -You can keep pushing new commits on your branch to your fork of gudhi-devel, the pull request will automatically notice the new commits there. -There is no need to create a new pull request. -Once the branch is under review, fixing issues is good, but please refrain from adding extra features, that just makes the reviewers' job harder and thus slower. -You may want to look at https://github.com/settings/notifications (and other settings nearby) if you don't receive emails when people comment on your pull request. -Some bold reviewer might make changes to your branch. You will then need `git pull` for your local branch to reflect those. - -## Your work is merged! -Once your pull request has been closed (your branch merged), you can remove your branch, both locally and also the branch on your github fork: -```bash -git checkout master # or any other branch, but you cannot remove the branch you are currently in -git branch -d some-fancy-name # local branch delete -git push origin --delete some-fancy-name # remote branch delete -``` -If you add @VincentRouvreau or @mglisse as collaborator (https://github.com/LOGIN/gudhi-devel/settings/collaboration), they may remove the branch on your clone at the same time as they merge the branch, so you only have the local one to remove (or keep if you are nostalgic). - -## Keep in touch -Create a new branch and keep contributing! - -Do not try to reuse an old branch that has already been merged. -Make sure you run the fetch command just before creating any new branch, so you don't base it on some outdated version of master. -You can also work on several branches at the same time, using `git checkout some-fancy-name` and `git checkout name-of-other-branch` to switch between them (commit before switching or things may get complicated). diff --git a/for_dev/next_release.md b/for_dev/next_release.md deleted file mode 100644 index a2805a55..00000000 --- a/for_dev/next_release.md +++ /dev/null @@ -1,28 +0,0 @@ -We are pleased to announce the release 3.X.X of the GUDHI library. - -As a major new feature, the GUDHI library now offers ... - -We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). - -Below is a list of changes made since GUDHI 3.X-1.X-1: - -- [Module](link) - - ... - -- [Module](link) - - ... - -- Miscellaneous - - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. - -All modules are distributed under the terms of the MIT license. -However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. - -We kindly ask users to cite the GUDHI library as appropriately as possible in their papers, and to mention the use of the GUDHI library on the web pages of their projects using GUDHI and provide us with links to these web pages. - -We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) for the modules of the User and Reference Manual, as well as for publications directly related to the GUDHI library. - -Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. - -For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). - -- cgit v1.2.3 From e98d18182076ef5f66361a6ef404e55ff13567e4 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 17:18:14 +0100 Subject: [skip ci] Add some contributing content --- .github/CONTRIBUTING.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index e69de29b..eacf32f8 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing to GUDHI + +First of all, thank you for the time you may take to contribute to GUDHI ! + +# In case you have a question + +Please, check our [contact web page](https://gudhi.inria.fr/contact/). + +# In case you found an issue + +Please, first check [opened issues on GUDHI](https://github.com/GUDHI/gudhi-devel/issues). + +If the problem you are facing is not referenced, do not hesitate to open a [new issue](https://github.com/GUDHI/gudhi-devel/issues/new). + +This place is also a good place if you have some enhancement you want to propose for the GUDHI library. +There is a label **enhancement** in the [new issue](https://github.com/GUDHI/gudhi-devel/issues/new) page. + +# In case you want to contribute to GUDHI + +## You are not familiar with GitHub ? + +Please take some time to read our [how to use GitHub to contribute to GUDHI](/home/vincent/workspace/gudhi/gudhi-devel/for_dev/how_to_use_github_to_contribute_to_gudhi.md). + +## Something you want to improve in the documentation + +For C++ documentation, you can find it in the directories: +* *src/common/doc* for the main page and installation instructions +* *src/NAME_OF_THE_MODULE/doc* for the main page of a module +* *src/NAME_OF_THE_MODULE/include/gudhi* for the documentation generated from the code. +We use Doxygen to generate the code and you will be able to verify the result in CircleCI Doxygen target in the artifacts. + +For Python documentation, you can find it in the directories: +* *src/python/doc* for the main page, installation instructionsand for the main pages of the modules +* *src/python/gudhi/NAME_OF_THE_MODULE.pyx* for the documentation generated from the code. +We use Sphinx to generate the code and you will be able to verify the result in CircleCI Sphinx target in the artifacts. + +## Something you want to improve in the code + +Please first take some time to read our [code conventions](code_conventions.md) + -- cgit v1.2.3 From 8dc47a277c50744812a6e65e8e817fed479b301d Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 13 Feb 2020 17:25:55 +0100 Subject: [skip ci] contributing --- .github/CONTRIBUTING.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index eacf32f8..29fe0aaa 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -12,7 +12,7 @@ Please, first check [opened issues on GUDHI](https://github.com/GUDHI/gudhi-deve If the problem you are facing is not referenced, do not hesitate to open a [new issue](https://github.com/GUDHI/gudhi-devel/issues/new). -This place is also a good place if you have some enhancement you want to propose for the GUDHI library. +This place is also a good place if you have some enhancement you want to propose. There is a label **enhancement** in the [new issue](https://github.com/GUDHI/gudhi-devel/issues/new) page. # In case you want to contribute to GUDHI @@ -38,3 +38,6 @@ We use Sphinx to generate the code and you will be able to verify the result in Please first take some time to read our [code conventions](code_conventions.md) +As a convention, we set a Pull Request as a **Draft Pull Request** when we work on something we want the other contributors to see. + +We click on **Ready for review** to ask for a peer review of the contribution. \ No newline at end of file -- cgit v1.2.3 From ef0f82ef2155440827e17c552abb49b509866fc7 Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 13 Feb 2020 16:01:29 -0500 Subject: integrated hera --- .../diagram_vectorizations_distances_kernels.py | 7 ++++++- src/python/gudhi/representations/metrics.py | 23 ++++++++++++++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index 66c32cc2..6352d2b5 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -117,7 +117,12 @@ X = SW.fit(diags) Y = SW.transform(diags2) print("SW kernel is " + str(Y[0][0])) -W = WassersteinDistance(order=2, internal_p=2) +W = WassersteinDistance(order=2, internal_p=2, mode="pot") +X = W.fit(diags) +Y = W.transform(diags2) +print("Wasserstein distance is " + str(Y[0][0])) + +W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001) X = W.fit(diags) Y = W.transform(diags2) print("Wasserstein distance is " + str(Y[0][0])) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index cc788994..ed998603 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -10,7 +10,8 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances -from gudhi.wasserstein import wasserstein_distance +from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance +from gudhi.hera import wasserstein_distance as hera_wasserstein_distance from .preprocessing import Padding try: @@ -117,8 +118,10 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa if metric == "bottleneck": return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, **kwargs)) - elif metric == "wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(wasserstein_distance, **kwargs)) + elif metric == "wasserstein" or metric == "pot_wasserstein": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, **kwargs)) + elif metric == "hera_wasserstein": + return pairwise_distances(XX, YY, metric=sklearn_wrapper(hera_wasserstein_distance, **kwargs)) elif metric == "sliced_wasserstein": return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, **kwargs)) elif metric == "persistence_fisher": @@ -205,15 +208,19 @@ class WassersteinDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams. """ - def __init__(self, order=2, internal_p=2): + def __init__(self, order=2, internal_p=2, mode="pot", delta=0.0001): """ Constructor for the WassersteinDistance class. Parameters: order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`. internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`. + mode (str): method for computing Wasserstein distance. Either "pot" or "hera". + delta (float): relative error 1+delta. Used only if mode == "hera". """ - self.order, self.internal_p = order, internal_p + self.order, self.internal_p, self.mode = order, internal_p, mode + self.metric = "pot_wasserstein" if mode == "pot" else "hera_wasserstein" + self.delta = delta def fit(self, X, y=None): """ @@ -236,7 +243,11 @@ class WassersteinDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances. """ - return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="wasserstein", order=self.order, internal_p=self.internal_p) + if self.metric == "hera_wasserstein": + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta) + else: + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p) + return Xfit class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ -- cgit v1.2.3 From d9290a78741fc14dc0f87d395da967a4d561b34a Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 13 Feb 2020 16:11:34 -0500 Subject: small modif on example file --- src/python/example/diagram_vectorizations_distances_kernels.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index 6352d2b5..507ead7c 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -120,12 +120,12 @@ print("SW kernel is " + str(Y[0][0])) W = WassersteinDistance(order=2, internal_p=2, mode="pot") X = W.fit(diags) Y = W.transform(diags2) -print("Wasserstein distance is " + str(Y[0][0])) +print("Wasserstein distance (POT) is " + str(Y[0][0])) W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001) X = W.fit(diags) Y = W.transform(diags2) -print("Wasserstein distance is " + str(Y[0][0])) +print("Wasserstein distance (hera) is " + str(Y[0][0])) W = BottleneckDistance(epsilon=.001) X = W.fit(diags) -- cgit v1.2.3 From fe754ca20cf942e2af186f14e5a3d24e23b6c80e Mon Sep 17 00:00:00 2001 From: mathieu Date: Thu, 13 Feb 2020 19:27:40 -0500 Subject: fix Marc's comments --- src/python/gudhi/cubical_complex.pyx | 49 ++++++++-------- .../include/Persistent_cohomology_interface.h | 67 ++++++++++------------ 2 files changed, 55 insertions(+), 61 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index bd432834..8cf43539 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -31,7 +31,7 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>": Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) - vector[pair[int, pair[pair[double, int], pair[double, int]]]] get_cofaces_of_cubical_persistence_pairs(int homology_coeff_field, double min_persistence) + vector[vector[int]] cofaces_of_cubical_persistence_pairs() vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -146,31 +146,32 @@ cdef class CubicalComplex: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result - def cofaces_of_cubical_persistence_pairs(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): - """This function returns the persistence of the simplicial complex. - - :param homology_coeff_field: The homology coefficient field. Must be a - prime number. Default value is 11. - :type homology_coeff_field: int. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Sets min_persistence to -1.0 to see all values. - :type min_persistence: float. - :param persistence_dim_max: If true, the persistent homology for the - maximal dimension in the complex is computed. If false, it is - ignored. Default is false. - :type persistence_dim_max: bool - :returns: The persistence of the simplicial complex, together with the cofaces of the corresponding generators, i.e., the top-dimensional cells/cofaces of the positive and negative simplices. - :rtype: list of pairs(dimension, pair(index of positive top-dimensional cell, index of negative top-dimensional cell)) + def cofaces_of_persistence_pairs(self): + """A persistence interval is described by a pair of cells, one that creates the + feature and one that kills it. The filtration values of those 2 cells give coordinates + for a point in a persistence diagram, or a bar in a barcode. Structurally, in the + cubical complexes provided here, the filtration value of any cell is the minimum of the + filtration values of the maximal cells that contain it. Connecting persistence diagram + coordinates to the corresponding value in the input (i.e. the filtration values of + the top-dimensional cells) is useful for differentiation purposes. + + This function returns a list of pairs of top-dimensional cells corresponding to + the persistence birth and death cells of the filtration. The cells are represented by + their indices in the input list of top-dimensional cells (and not their indices in the + internal datastructure that includes non-maximal cells). Note that when two adjacent + top-dimensional cells have the same filtration value, we arbitrarily return one of the two + when calling the function on one of their common faces. + + :returns: The top-dimensional cells/cofaces of the positive and negative cells. + :rtype: list of pairs(index of positive top-dimensional cell, index of negative top-dimensional cell) """ + cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: - del self.pcohptr - self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) - cdef vector[pair[int, pair[pair[double, int], pair[double, int]]]] persistence_result - if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_cofaces_of_cubical_persistence_pairs(homology_coeff_field, min_persistence) - return persistence_result + persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() + else: + print("cofaces_of_persistence_pairs function requires persistence function" + " to be launched first.") + return np.array(persistence_result) def betti_numbers(self): """This function returns the Betti numbers of the complex. diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 1a1e716e..e5accf50 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -63,7 +63,6 @@ persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); - std::vector>> persistence; for (auto pair : persistent_pairs) { persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), @@ -73,58 +72,52 @@ persistent_cohomology::Persistent_cohomology & cofaces, int splx){ - if (stptr_->dimension(stptr_->simplex(splx)) == stptr_->dimension()){cofaces.push_back(stptr_->simplex(splx));} - else{ for (auto v : stptr_->coboundary_simplex_range(stptr_->simplex(splx))){top_dimensional_cofaces(cofaces, stptr_->key(v));} } + int top_dimensional_coface(int splx){ + if (stptr_->dimension(splx) == stptr_->dimension()){return splx;} + else{ + for (auto v : stptr_->coboundary_simplex_range(splx)){ + if(stptr_->filtration(v) == stptr_->filtration(splx)){ + return top_dimensional_coface(v); + } + } + } } - std::vector, std::pair>>> get_cofaces_of_cubical_persistence_pairs(int homology_coeff_field, - double min_persistence) { + std::vector> cofaces_of_cubical_persistence_pairs() { // Warning: this function is meant to be used with CubicalComplex only!! + auto pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); + // Gather all top-dimensional cells and store their simplex handles - std::vector max_splx; for (auto splx : stptr_->filtration_simplex_range()){ if (stptr_->dimension(splx) == stptr_->dimension()) max_splx.push_back(splx); } + std::vector max_splx; for (auto splx : stptr_->top_dimensional_cells_range()){ + max_splx.push_back(splx); + } // Sort these simplex handles and compute the ordering function // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data - std::map order; std::sort(max_splx.begin(), max_splx.end()); for (unsigned int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); - - persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); - persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); + std::map order; std::sort(max_splx.begin(), max_splx.end()); + for (unsigned int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); - // Custom sort and output persistence - cmp_intervals_by_dim_then_length cmp(stptr_); - auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); - std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); - - std::vector, std::pair>>> persistence; - for (auto pair : persistent_pairs) { - - double f0 = stptr_->filtration(get<0>(pair)); - // Recursively get the top-dimensional cells / cofaces associated to the persistence generator - std::vector faces0; top_dimensional_cofaces(faces0, stptr_->key(get<0>(pair))); - // Find the top-dimensional cell / coface with the same filtration value - int cf; for (unsigned int i = 0; i < faces0.size(); i++){if (stptr_->filtration(faces0[i]) == f0){cf = i; break;}} + std::vector> persistence_pairs; + for (auto pair : pairs) { + int h = stptr_->dimension(get<0>(pair)); + // Recursively get the top-dimensional cell / coface associated to the persistence generator + int face0 = top_dimensional_coface(get<0>(pair)); // Retrieve the index of the corresponding top-dimensional cell in the input data - int splx0 = order[faces0[cf]]; + int splx0 = order[face0]; int splx1 = -1; if (isfinite(stptr_->filtration(get<1>(pair)))){ - double f1 = stptr_->filtration(get<1>(pair)); - // Recursively get the top-dimensional cells / cofaces associated to the persistence generator - std::vector faces1; top_dimensional_cofaces(faces1, stptr_->key(get<1>(pair))); - // Find the top-dimensional cell / coface with the same filtration value - int cf; for (unsigned int i = 0; i < faces1.size(); i++){if (stptr_->filtration(faces1[i]) == f1){cf = i; break;}} + // Recursively get the top-dimensional cell / coface associated to the persistence generator + int face1 = top_dimensional_coface(get<1>(pair)); // Retrieve the index of the corresponding top-dimensional cell in the input data - splx1 = order[faces1[cf]]; + splx1 = order[face1]; } - - persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), std::make_pair(std::make_pair(stptr_->filtration(get<0>(pair)), splx0), std::make_pair(stptr_->filtration(get<1>(pair)), splx1)))); + std::vector vect{ h, splx0, splx1}; + persistence_pairs.push_back(vect); } - return persistence; + return persistence_pairs; } std::vector, std::vector>> persistence_pairs() { -- cgit v1.2.3 From a6a4f375822cf3e2ca1866d78472e4350140ddbc Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 11:02:56 +0900 Subject: Add __init__.py --- src/python/gudhi/point_cloud/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/python/gudhi/point_cloud/__init__.py diff --git a/src/python/gudhi/point_cloud/__init__.py b/src/python/gudhi/point_cloud/__init__.py new file mode 100644 index 00000000..e69de29b -- cgit v1.2.3 From 9cc9e1cf3cd9ea42908324d410ef68fa12e8e832 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 11:08:50 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 66 ++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 18 deletions(-) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index f283916d..d899da67 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -10,30 +10,55 @@ import numpy as np class TimeDelayEmbedding: """Point cloud transformation class. - Embeds time-series data in the R^d according to Takens' Embedding Theorem and obtains the coordinates of each point. - Parameters ---------- dim : int, optional (default=3) `d` of R^d to be embedded. - delay : int, optional (default=1) Time-Delay embedding. - skip : int, optional (default=1) How often to skip embedded points. - + Given delay=3 and skip=2, an point cloud which is obtained by embedding + a single time-series data into R^3 is as follows. + + .. code-block:: none + + time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + point clouds = [[1, 4, 7], + [3, 6, 9]] + """ def __init__(self, dim=3, delay=1, skip=1): self._dim = dim self._delay = delay self._skip = skip - def __call__(self, *args, **kwargs): - return self.transform(*args, **kwargs) + def __call__(self, ts): + """Transform method for single time-series data. + Parameters + ---------- + ts : list[float] + A single time-series data. + Returns + ------- + point clouds : list[list[float, float, float]] + Makes point cloud every a single time-series data. + Raises + ------- + TypeError + If the parameter's type does not match the desired type. + """ + ndts = np.array(ts) + if ndts.ndim == 1: + return self._transform(ndts) + else: + raise TypeError("Expects 1-dimensional array.") + def fit(self, ts, y=None): + return self + def _transform(self, ts): """Guts of transform method.""" return ts[ @@ -43,22 +68,27 @@ class TimeDelayEmbedding: ] def transform(self, ts): - """Transform method. - + """Transform method for multiple time-series data. Parameters ---------- - ts : list[float] or list[list[float]] - A single or multiple time-series data. - + ts : list[list[float]] + Multiple time-series data. + Attributes + ---------- + ndts : + The ndts means that all time series need to have exactly + the same size. Returns ------- - point clouds : list[list[float, float, float]] or list[list[list[float, float, float]]] + point clouds : list[list[list[float, float, float]]] Makes point cloud every a single time-series data. + Raises + ------- + TypeError + If the parameter's type does not match the desired type. """ ndts = np.array(ts) - if ndts.ndim == 1: - # for single. - return self._transform(ndts).tolist() + if ndts.ndim == 2: + return np.apply_along_axis(self._transform, 1, ndts) else: - # for multiple. - return np.apply_along_axis(self._transform, 1, ndts).tolist() + raise TypeError("Expects 2-dimensional array.") -- cgit v1.2.3 From 2253fd03bb49aea455309f6d633a6edeb2362d79 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 17:52:07 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index 2ee0c1fb..d2ffbf40 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -6,30 +6,30 @@ def test_normal(): # Normal case. prep = TimeDelayEmbedding() attractor = prep(ts) - assert (attractor[0] == [1, 2, 3]) - assert (attractor[1] == [2, 3, 4]) - assert (attractor[2] == [3, 4, 5]) - assert (attractor[3] == [4, 5, 6]) - assert (attractor[4] == [5, 6, 7]) - assert (attractor[5] == [6, 7, 8]) - assert (attractor[6] == [7, 8, 9]) - assert (attractor[7] == [8, 9, 10]) + assert (attractor[0] == np.array[1, 2, 3]) + assert (attractor[1] == np.array[2, 3, 4]) + assert (attractor[2] == np.array[3, 4, 5]) + assert (attractor[3] == np.array[4, 5, 6]) + assert (attractor[4] == np.array[5, 6, 7]) + assert (attractor[5] == np.array[6, 7, 8]) + assert (attractor[6] == np.array[7, 8, 9]) + assert (attractor[7] == np.array[8, 9, 10]) # Delay = 3 prep = TimeDelayEmbedding(delay=3) attractor = prep(ts) - assert (attractor[0] == [1, 4, 7]) - assert (attractor[1] == [2, 5, 8]) - assert (attractor[2] == [3, 6, 9]) - assert (attractor[3] == [4, 7, 10]) + assert (attractor[0] == np.array[1, 4, 7]) + assert (attractor[1] == np.array[2, 5, 8]) + assert (attractor[2] == np.array[3, 6, 9]) + assert (attractor[3] == np.array[4, 7, 10]) # Skip = 3 prep = TimeDelayEmbedding(skip=3) attractor = prep(ts) - assert (attractor[0] == [1, 2, 3]) - assert (attractor[1] == [4, 5, 6]) - assert (attractor[2] == [7, 8, 9]) + assert (attractor[0] == np.array[1, 2, 3]) + assert (attractor[1] == np.array[4, 5, 6]) + assert (attractor[2] == np.array[7, 8, 9]) # Delay = 2 / Skip = 2 prep = TimeDelayEmbedding(delay=2, skip=2) attractor = prep(ts) - assert (attractor[0] == [1, 3, 5]) - assert (attractor[1] == [3, 5, 7]) - assert (attractor[2] == [5, 7, 9]) + assert (attractor[0] == np.array[1, 3, 5]) + assert (attractor[1] == np.array[3, 5, 7]) + assert (attractor[2] == np.array[5, 7, 9]) -- cgit v1.2.3 From f58a4120b70487aede3cb4e81fbb15171e34fa37 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 18:24:18 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index d2ffbf40..1cdf56f9 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -6,30 +6,30 @@ def test_normal(): # Normal case. prep = TimeDelayEmbedding() attractor = prep(ts) - assert (attractor[0] == np.array[1, 2, 3]) - assert (attractor[1] == np.array[2, 3, 4]) - assert (attractor[2] == np.array[3, 4, 5]) - assert (attractor[3] == np.array[4, 5, 6]) - assert (attractor[4] == np.array[5, 6, 7]) - assert (attractor[5] == np.array[6, 7, 8]) - assert (attractor[6] == np.array[7, 8, 9]) - assert (attractor[7] == np.array[8, 9, 10]) + assert (attractor[0] == np.array([1, 2, 3])) + assert (attractor[1] == np.array([2, 3, 4])) + assert (attractor[2] == np.array([3, 4, 5])) + assert (attractor[3] == np.array([4, 5, 6])) + assert (attractor[4] == np.array([5, 6, 7])) + assert (attractor[5] == np.array([6, 7, 8])) + assert (attractor[6] == np.array([7, 8, 9])) + assert (attractor[7] == np.array([8, 9, 10])) # Delay = 3 prep = TimeDelayEmbedding(delay=3) attractor = prep(ts) - assert (attractor[0] == np.array[1, 4, 7]) - assert (attractor[1] == np.array[2, 5, 8]) - assert (attractor[2] == np.array[3, 6, 9]) - assert (attractor[3] == np.array[4, 7, 10]) + assert (attractor[0] == np.array([1, 4, 7])) + assert (attractor[1] == np.array([2, 5, 8])) + assert (attractor[2] == np.array([3, 6, 9])) + assert (attractor[3] == np.array([4, 7, 10])) # Skip = 3 prep = TimeDelayEmbedding(skip=3) attractor = prep(ts) - assert (attractor[0] == np.array[1, 2, 3]) - assert (attractor[1] == np.array[4, 5, 6]) - assert (attractor[2] == np.array[7, 8, 9]) + assert (attractor[0] == np.array([1, 2, 3])) + assert (attractor[1] == np.array([4, 5, 6])) + assert (attractor[2] == np.array([7, 8, 9])) # Delay = 2 / Skip = 2 prep = TimeDelayEmbedding(delay=2, skip=2) attractor = prep(ts) - assert (attractor[0] == np.array[1, 3, 5]) - assert (attractor[1] == np.array[3, 5, 7]) - assert (attractor[2] == np.array[5, 7, 9]) + assert (attractor[0] == np.array([1, 3, 5])) + assert (attractor[1] == np.array([3, 5, 7])) + assert (attractor[2] == np.array([5, 7, 9])) -- cgit v1.2.3 From 1c0f48fb26bb2e606dfe0a22e62618357686e2c2 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 18:49:27 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index 1cdf56f9..3b586ad2 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -1,4 +1,5 @@ from gudhi.point_cloud.timedelay import TimeDelayEmbedding +import numpy as np def test_normal(): # Sample array -- cgit v1.2.3 From 39873c0cf43ca7352dddeab8c1cc6a3fc40a2e58 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 19:08:50 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index 3b586ad2..7b6562a5 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -7,7 +7,8 @@ def test_normal(): # Normal case. prep = TimeDelayEmbedding() attractor = prep(ts) - assert (attractor[0] == np.array([1, 2, 3])) + assert (attractor[0] == np.array([1, 2, 3]) + print(attractor[0].all())) assert (attractor[1] == np.array([2, 3, 4])) assert (attractor[2] == np.array([3, 4, 5])) assert (attractor[3] == np.array([4, 5, 6])) -- cgit v1.2.3 From 7c6966ee9821aaeb60d282616445a47071ac1fee Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 19:20:25 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index 7b6562a5..f652fc88 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -7,31 +7,30 @@ def test_normal(): # Normal case. prep = TimeDelayEmbedding() attractor = prep(ts) - assert (attractor[0] == np.array([1, 2, 3]) - print(attractor[0].all())) - assert (attractor[1] == np.array([2, 3, 4])) - assert (attractor[2] == np.array([3, 4, 5])) - assert (attractor[3] == np.array([4, 5, 6])) - assert (attractor[4] == np.array([5, 6, 7])) - assert (attractor[5] == np.array([6, 7, 8])) - assert (attractor[6] == np.array([7, 8, 9])) - assert (attractor[7] == np.array([8, 9, 10])) + assert (attractor[0].all() == np.array([1, 2, 3])) + assert (attractor[1].all() == np.array([2, 3, 4])) + assert (attractor[2].all() == np.array([3, 4, 5])) + assert (attractor[3].all() == np.array([4, 5, 6])) + assert (attractor[4].all() == np.array([5, 6, 7])) + assert (attractor[5].all() == np.array([6, 7, 8])) + assert (attractor[6].all() == np.array([7, 8, 9])) + assert (attractor[7].all() == np.array([8, 9, 10])) # Delay = 3 prep = TimeDelayEmbedding(delay=3) attractor = prep(ts) - assert (attractor[0] == np.array([1, 4, 7])) - assert (attractor[1] == np.array([2, 5, 8])) - assert (attractor[2] == np.array([3, 6, 9])) - assert (attractor[3] == np.array([4, 7, 10])) + assert (attractor[0].all() == np.array([1, 4, 7])) + assert (attractor[1].all() == np.array([2, 5, 8])) + assert (attractor[2].all() == np.array([3, 6, 9])) + assert (attractor[3].all() == np.array([4, 7, 10])) # Skip = 3 prep = TimeDelayEmbedding(skip=3) attractor = prep(ts) - assert (attractor[0] == np.array([1, 2, 3])) - assert (attractor[1] == np.array([4, 5, 6])) - assert (attractor[2] == np.array([7, 8, 9])) + assert (attractor[0].all() == np.array([1, 2, 3])) + assert (attractor[1].all() == np.array([4, 5, 6])) + assert (attractor[2].all() == np.array([7, 8, 9])) # Delay = 2 / Skip = 2 prep = TimeDelayEmbedding(delay=2, skip=2) attractor = prep(ts) - assert (attractor[0] == np.array([1, 3, 5])) - assert (attractor[1] == np.array([3, 5, 7])) - assert (attractor[2] == np.array([5, 7, 9])) + assert (attractor[0].all() == np.array([1, 3, 5])) + assert (attractor[1].all() == np.array([3, 5, 7])) + assert (attractor[2].all() == np.array([5, 7, 9])) -- cgit v1.2.3 From 5023aa0ff30474a96783152844e7fb0ed52e0c98 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Fri, 14 Feb 2020 20:25:14 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index f652fc88..5464a185 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -7,30 +7,30 @@ def test_normal(): # Normal case. prep = TimeDelayEmbedding() attractor = prep(ts) - assert (attractor[0].all() == np.array([1, 2, 3])) - assert (attractor[1].all() == np.array([2, 3, 4])) - assert (attractor[2].all() == np.array([3, 4, 5])) - assert (attractor[3].all() == np.array([4, 5, 6])) - assert (attractor[4].all() == np.array([5, 6, 7])) - assert (attractor[5].all() == np.array([6, 7, 8])) - assert (attractor[6].all() == np.array([7, 8, 9])) - assert (attractor[7].all() == np.array([8, 9, 10])) + assert (attractor[0] == np.array([1, 2, 3])).all() + assert (attractor[1] == np.array([2, 3, 4])).all() + assert (attractor[2] == np.array([3, 4, 5])).all() + assert (attractor[3] == np.array([4, 5, 6])).all() + assert (attractor[4] == np.array([5, 6, 7])).all() + assert (attractor[5] == np.array([6, 7, 8])).all() + assert (attractor[6] == np.array([7, 8, 9])).all() + assert (attractor[7] == np.array([8, 9, 10])).all() # Delay = 3 prep = TimeDelayEmbedding(delay=3) attractor = prep(ts) - assert (attractor[0].all() == np.array([1, 4, 7])) - assert (attractor[1].all() == np.array([2, 5, 8])) - assert (attractor[2].all() == np.array([3, 6, 9])) - assert (attractor[3].all() == np.array([4, 7, 10])) + assert (attractor[0] == np.array([1, 4, 7])).all() + assert (attractor[1] == np.array([2, 5, 8])).all() + assert (attractor[2] == np.array([3, 6, 9])).all() + assert (attractor[3] == np.array([4, 7, 10])).all() # Skip = 3 prep = TimeDelayEmbedding(skip=3) attractor = prep(ts) - assert (attractor[0].all() == np.array([1, 2, 3])) - assert (attractor[1].all() == np.array([4, 5, 6])) - assert (attractor[2].all() == np.array([7, 8, 9])) + assert (attractor[0] == np.array([1, 2, 3])).all() + assert (attractor[1] == np.array([4, 5, 6])).all() + assert (attractor[2] == np.array([7, 8, 9])).all() # Delay = 2 / Skip = 2 prep = TimeDelayEmbedding(delay=2, skip=2) attractor = prep(ts) - assert (attractor[0].all() == np.array([1, 3, 5])) - assert (attractor[1].all() == np.array([3, 5, 7])) - assert (attractor[2].all() == np.array([5, 7, 9])) + assert (attractor[0] == np.array([1, 3, 5])).all() + assert (attractor[1] == np.array([3, 5, 7])).all() + assert (attractor[2] == np.array([5, 7, 9])).all() -- cgit v1.2.3 From dc4442bc402ac25290eb529b57407607434bb7ae Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 14 Feb 2020 14:53:51 +0100 Subject: barycenter update, adding more tests and details about log (assigments, cost, nb iter) --- src/python/gudhi/barycenter.py | 125 +++++++++++-------------- src/python/test/test_wasserstein_barycenter.py | 15 ++- 2 files changed, 69 insertions(+), 71 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 11098afe..4a00c457 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -2,6 +2,7 @@ import ot import numpy as np import scipy.spatial.distance as sc +from wasserstein import _build_dist_matrix, _perstot # This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. # See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. @@ -20,42 +21,19 @@ def _proj_on_diag(w): return np.array([(w[0] + w[1])/2 , (w[0] + w[1])/2]) -def _proj_on_diag_array(X): - ''' - :param X: (n x 2) array encoding the points of a persistent diagram. - :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal - ''' - Z = (X[:,0] + X[:,1]) / 2. - return np.array([Z , Z]).T - - -def _build_dist_matrix(X, Y, p=2., q=2.): - ''' - :param X: (n x 2) numpy.array encoding the (points of the) first diagram. - :param Y: (m x 2) numpy.array encoding the second diagram. - :param q: Ground metric (i.e. norm l_q). - :param p: exponent for the Wasserstein metric. - :returns: (n+1) x (m+1) np.array encoding the cost matrix C. - For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal. - note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal). - Note that for lagrangian_barycenter, one must use p=q=2. - ''' - Xdiag = _proj_on_diag_array(X) - Ydiag = _proj_on_diag_array(Y) - if np.isinf(q): - C = sc.cdist(X, Y, metric='chebyshev')**p - Cxd = np.linalg.norm(X - Xdiag, ord=q, axis=1)**p - Cdy = np.linalg.norm(Y - Ydiag, ord=q, axis=1)**p +def _mean(x, m): + """ + :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} + :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal + :returns: the weighted mean of x with (m-k) copies of the diagonal + """ + k = len(x) + if k > 0: + w = np.mean(x, axis=0) + w_delta = _proj_on_diag(w) + return (k * w + (m-k) * w_delta) / m else: - C = sc.cdist(X,Y, metric='minkowski', p=q)**p - Cxd = np.linalg.norm(X - Xdiag, ord=q, axis=1)**p - Cdy = np.linalg.norm(Y - Ydiag, ord=q, axis=1)**p - Cf = np.hstack((C, Cxd[:,None])) - Cdy = np.append(Cdy, 0) - - Cf = np.vstack((Cf, Cdy[None,:])) - - return Cf + return np.array([0, 0]) def _optimal_matching(X, Y, withcost=False): @@ -64,63 +42,63 @@ def _optimal_matching(X, Y, withcost=False): :param Y: numpy.array of size (m x 2) :param withcost: returns also the cost corresponding to this optimal matching :returns: numpy.array of shape (k x 2) encoding the list of edges in the optimal matching. - That is, [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] - if i > len(X) or j > len(Y), it means they represent the diagonal. - + That is, [[i, j] ...], where (i,j) indicates that X[i] is matched to Y[j] + if i >= len(X) or j >= len(Y), it means they represent the diagonal. + They will be encoded by -1 afterwards. """ n = len(X) m = len(Y) + # Start by handling empty diagrams. Could it be shorten? if X.size == 0: # X is empty if Y.size == 0: # Y is empty - return np.array([[0,0]]) # the diagonal is matched to the diagonal and that's it... - else: - return np.column_stack([np.zeros(m+1, dtype=int), np.arange(m+1, dtype=int)]) + res = np.array([[0,0]]) # the diagonal is matched to the diagonal and that's it... + if withcost: + return res, 0 + else: + return res + else: # X is empty but not Y + res = np.array([[0, i] for i in range(m)]) + cost = _perstot(Y, order=2, internal_p=2)**2 + if withcost: + return res, cost + else: + return res elif Y.size == 0: # X is not empty but Y is empty - return np.column_stack([np.zeros(n+1, dtype=int), np.arange(n+1, dtype=int)]) - + res = np.array([[i,0] for i in range(n)]) + cost = _perstot(X, order=2, internal_p=2)**2 + if withcost: + return res, cost + else: + return res + # we know X, Y are not empty diags now - M = _build_dist_matrix(X, Y) + M = _build_dist_matrix(X, Y, order=2, internal_p=2) a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. b[-1] = b[-1] * n # so that we have a probability measure, required by POT P = ot.emd(a=a, b=b, M=M)*(n+m) - # Note : it seems POT return a permutation matrix in this situation, ie a vertex of the constraint set (generically true). + # Note : it seems POT returns a permutation matrix in this situation, ie a vertex of the constraint set (generically true). if withcost: - cost = np.sqrt(np.sum(np.multiply(P, M))) + cost = np.sum(np.multiply(P, M)) P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to be improved. - # return the list of (i,j) such that P[i,j] > 0, i.e. x_i is matched to y_j (should it be the diag). res = np.nonzero(P) + # return the list of (i,j) such that P[i,j] > 0, i.e. x_i is matched to y_j (should it be the diag). if withcost: return np.column_stack(res), cost return np.column_stack(res) -def _mean(x, m): - """ - :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} - :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal - :returns: the weighted mean of x with (m-k) copies of the diagonal - """ - k = len(x) - if k > 0: - w = np.mean(x, axis=0) - w_delta = _proj_on_diag(w) - return (k * w + (m-k) * w_delta) / m - else: - return np.array([0, 0]) - - def lagrangian_barycenter(pdiagset, init=None, verbose=False): """ Compute the estimated barycenter computed with the algorithm provided by Turner et al (2014). It is a local minimum of the corresponding Frechet function. - :param pdiagset: a list of size N containing numpy.array of shape (n x 2) + :param pdiagset: a list of size m containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. :param init: The initial value for barycenter estimate. @@ -134,10 +112,13 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): If verbose, returns a couple (Y, log) where Y is the barycenter estimate, and log is a dict that contains additional informations: - - assigments, a list of list of pairs (i,j), - That is, a[k] = [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] + - groupings, a list of list of pairs (i,j), + That is, G[k] = [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] if i > len(X) or j > len(Y), it means they represent the diagonal. - - energy, a float representing the Frechet mean value obtained. + - energy, a float representing the Frechet energy value obtained, + that is the mean of squared distances of observations to the output. + - nb_iter, integer representing the number of iterations performed before convergence + of the algorithm. """ X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging @@ -157,8 +138,11 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): else: Y = init.copy() + nb_iter = 0 + converged = False # stoping criterion while not converged: + nb_iter += 1 K = len(Y) # current nb of points in Y (some might be on diagonal) G = np.zeros((K, m), dtype=int)-1 # will store for each j, the (index) point matched in each other diagram (might be the diagonal). # that is G[j, i] = k <=> y_j is matched to @@ -185,7 +169,6 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): new_created_points.append(new_y) # Step 2 : Update current point position thanks to the groupings computed - to_delete = [] for j in range(K): matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] @@ -214,12 +197,16 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): n_y = len(Y) for i in range(m): edges, cost = _optimal_matching(Y, X[i], withcost=True) - print(edges) - groupings.append([x_i_j for (y_j, x_i_j) in enumerate(edges) if y_j < n_y]) + n_x = len(X[i]) + G = edges[np.where(edges[:,0]= n_x) + G[idx,1] = -1 # -1 will encode the diagonal + groupings.append(G) energy += cost log["groupings"] = groupings energy = energy/m log["energy"] = energy + log["nb_iter"] = nb_iter return Y, log else: diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index 910d23ff..07242582 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -27,7 +27,18 @@ def test_lagrangian_barycenter(): res = np.array([[0.27916667, 0.55416667], [0.7375, 0.7625], [0.2375, 0.2625]]) dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) + dg8 = np.array([[0., 4.]]) + + # error crit. + eps = 0.000001 - assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < 0.001 + + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2))) - assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < 0.001 + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps + Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True) + assert np.linalg.norm(Y - np.array([[1,3]])) < eps + assert np.abs(log["energy"] - 2) < eps + assert np.array_equal(log["groupings"][0] , np.array([[0, -1]])) + assert np.array_equal(log["groupings"][1] , np.array([[0, 0]])) + assert lagrangian_barycenter(pdiagset = []) is None -- cgit v1.2.3 From dc5c7ac2167bfa467b52d0a36ecb9999fe03ba91 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 14 Feb 2020 14:58:53 +0100 Subject: added two more tests for barycenter --- src/python/test/test_wasserstein_barycenter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index 07242582..a58a4d62 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -41,4 +41,5 @@ def test_lagrangian_barycenter(): assert np.abs(log["energy"] - 2) < eps assert np.array_equal(log["groupings"][0] , np.array([[0, -1]])) assert np.array_equal(log["groupings"][1] , np.array([[0, 0]])) + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3]])) < eps assert lagrangian_barycenter(pdiagset = []) is None -- cgit v1.2.3 From 3eaba12b66518717e90ffb1e410b7f8d769719cf Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 14 Feb 2020 15:41:23 +0100 Subject: update import gudhi.wasserstein --- src/python/gudhi/barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 4a00c457..a2af7a58 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -2,7 +2,7 @@ import ot import numpy as np import scipy.spatial.distance as sc -from wasserstein import _build_dist_matrix, _perstot +from gudhi.wasserstein import _build_dist_matrix, _perstot # This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. # See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -- cgit v1.2.3 From 34e1ae726e27fdd7c41f6d80d8ed7f6504dc3a0d Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 14 Feb 2020 18:16:27 +0100 Subject: Global improvement of rendering with Python tools --- src/python/gudhi/persistence_graphical_tools.py | 92 +++++++++++++++++++++---- 1 file changed, 77 insertions(+), 15 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 246280de..4a690241 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -5,6 +5,7 @@ # Copyright (C) 2016 Inria # # Modification(s): +# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility. # - YYYY/MM Author: Description of the modification from os import path @@ -43,6 +44,7 @@ def __min_birth_max_death(persistence, band=0.0): max_death += band return (min_birth, max_death) + def plot_persistence_barcode( persistence=[], persistence_file="", @@ -52,7 +54,9 @@ def plot_persistence_barcode( inf_delta=0.1, legend=False, colormap=None, - axes=None + axes=None, + fontsize=16, + title="Persistence barcode" ): """This function plots the persistence bar code from persistence values list or from a :doc:`persistence file `. @@ -81,11 +85,18 @@ def plot_persistence_barcode( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param fontsize: Fontsize to use in axis. + :type fontsize: int + :param title: title for the plot. + :type title: string :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') if persistence_file != "": if path.isfile(persistence_file): @@ -163,7 +174,7 @@ def plot_persistence_barcode( loc="lower right", ) - axes.set_title("Persistence barcode") + axes.set_title(title) # Ends plot on infinity value and starts a little bit before min_birth axes.axis([axis_start, infinity, 0, ind]) @@ -183,7 +194,11 @@ def plot_persistence_diagram( inf_delta=0.1, legend=False, colormap=None, - axes=None + axes=None, + aspect_equal=False, + fontsize=16, + title="Persistence diagram", + greyblock=True ): """This function plots the persistence diagram from persistence values list or from a :doc:`persistence file `. @@ -214,11 +229,23 @@ def plot_persistence_diagram( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param aspect_equal: if True, force plot to be square shaped. + :type aspect_equal: boolean + :param fontsize: Fontsize to use in axis. + :type fontsize: int + :param title: title for the plot. + :type title: string + :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True. + :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') + if persistence_file != "": if path.isfile(persistence_file): @@ -256,18 +283,27 @@ def plot_persistence_diagram( # Replace infinity values with max_death + delta for diagram to be more # readable infinity = max_death + delta + axis_end = max_death + delta / 2 axis_start = min_birth - delta # line display of equation : birth = death x = np.linspace(axis_start, infinity, 1000) # infinity line and text - axes.plot(x, x, color="k", linewidth=1.0) - axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha) - axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha) + axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k") + axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha) + # Infinity label + yt = axes.get_yticks() + yt = np.append(yt, infinity) + ytl = yt.tolist() + ytl[-1] = r'$+\infty$' + axes.set_yticks(yt) + axes.set_yticklabels(ytl) # bootstrap band if band > 0.0: axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red") - + # lower diag patch + if greyblock: + axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey')) # Draw points in loop for interval in reversed(persistence): if float(interval[1][1]) != float("inf"): @@ -293,11 +329,13 @@ def plot_persistence_diagram( ] ) - axes.set_xlabel("Birth") - axes.set_ylabel("Death") + axes.set_xlabel("Birth", fontsize=fontsize) + axes.set_ylabel("Death", fontsize=fontsize) # Ends plot on infinity value and starts a little bit before min_birth - axes.axis([axis_start, infinity, axis_start, infinity + delta]) - axes.set_title("Persistence diagram") + axes.axis([axis_start, axis_end, axis_start, infinity + delta]) + axes.set_title(title, fontsize=fontsize) # a different fontsize for the title? + if aspect_equal: + axes.set_aspect("equal") return axes except ImportError: @@ -313,7 +351,11 @@ def plot_persistence_density( dimension=None, cmap=None, legend=False, - axes=None + axes=None, + aspect_equal=False, + fontsize=16, + title="Persistence density", + greyblock=True ): """This function plots the persistence density from persistence values list or from a :doc:`persistence file `. Be @@ -355,11 +397,25 @@ def plot_persistence_density( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param aspect_equal: if True, force plot to be square shaped. + :type aspect_equal: boolean + :param fontsize: Fontsize to use in axis. + :type fontsize: int + :param title: title for the plot. + :type title: string + :param greyblock: if we want to plot a grey patch on the lower half plane + for nicer rendering. Default True. + :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt + import matplotlib.patches as mpatches from scipy.stats import kde + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') + if persistence_file != "": if dimension is None: @@ -418,12 +474,18 @@ def plot_persistence_density( # Make the plot img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap) + if greyblock: + axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey')) + if legend: plt.colorbar(img, ax=axes) - axes.set_xlabel("Birth") - axes.set_ylabel("Death") - axes.set_title("Persistence density") + axes.set_xlabel("Birth", fontsize=fontsize) + axes.set_ylabel("Death", fontsize=fontsize) + axes.set_title(title, fontsize=fontsize) + if aspect_equal: + axes.set_aspect("equal") + return axes except ImportError: -- cgit v1.2.3 From f8fe3fdb01f6161b57da732a1c3f0c14a8b359a6 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 14 Feb 2020 18:45:34 +0100 Subject: moved import after docstring + reduce lines < 80 char --- src/python/gudhi/barycenter.py | 99 +++++++++++++++++++++++++----------------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index a2af7a58..4a877b4a 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -1,9 +1,3 @@ -import ot -import numpy as np -import scipy.spatial.distance as sc - -from gudhi.wasserstein import _build_dist_matrix, _perstot - # This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. # See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. # Author(s): Theo Lacombe @@ -14,6 +8,13 @@ from gudhi.wasserstein import _build_dist_matrix, _perstot # - YYYY/MM Author: Description of the modification +import ot +import numpy as np +import scipy.spatial.distance as sc + +from gudhi.wasserstein import _build_dist_matrix, _perstot + + def _proj_on_diag(w): ''' Util function to project a point on the diag. @@ -24,7 +25,8 @@ def _proj_on_diag(w): def _mean(x, m): """ :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} - :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal + :param m: total amount of points taken into account, + that is we have (m-k) copies of diagonal :returns: the weighted mean of x with (m-k) copies of the diagonal """ k = len(x) @@ -40,11 +42,14 @@ def _optimal_matching(X, Y, withcost=False): """ :param X: numpy.array of size (n x 2) :param Y: numpy.array of size (m x 2) - :param withcost: returns also the cost corresponding to this optimal matching - :returns: numpy.array of shape (k x 2) encoding the list of edges in the optimal matching. - That is, [[i, j] ...], where (i,j) indicates that X[i] is matched to Y[j] - if i >= len(X) or j >= len(Y), it means they represent the diagonal. - They will be encoded by -1 afterwards. + :param withcost: returns also the cost corresponding to the optimal matching + :returns: numpy.array of shape (k x 2) encoding the list of edges + in the optimal matching. + That is, [[i, j] ...], where (i,j) indicates + that X[i] is matched to Y[j] + if i >= len(X) or j >= len(Y), it means they + represent the diagonal. + They will be encoded by -1 afterwards. """ n = len(X) @@ -52,7 +57,7 @@ def _optimal_matching(X, Y, withcost=False): # Start by handling empty diagrams. Could it be shorten? if X.size == 0: # X is empty if Y.size == 0: # Y is empty - res = np.array([[0,0]]) # the diagonal is matched to the diagonal and that's it... + res = np.array([[0,0]]) # the diagonal is matched to the diagonal if withcost: return res, 0 else: @@ -75,18 +80,20 @@ def _optimal_matching(X, Y, withcost=False): # we know X, Y are not empty diags now M = _build_dist_matrix(X, Y, order=2, internal_p=2) - a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. - a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT - b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. - b[-1] = b[-1] * n # so that we have a probability measure, required by POT + a = np.full(n+1, 1. / (n + m) ) + a[-1] = a[-1] * m + b = np.full(m+1, 1. / (n + m) ) + b[-1] = b[-1] * n P = ot.emd(a=a, b=b, M=M)*(n+m) - # Note : it seems POT returns a permutation matrix in this situation, ie a vertex of the constraint set (generically true). + # Note : it seems POT returns a permutation matrix in this situation, + # ie a vertex of the constraint set (generically true). if withcost: cost = np.sum(np.multiply(P, M)) - P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to be improved. + P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to improve. res = np.nonzero(P) - # return the list of (i,j) such that P[i,j] > 0, i.e. x_i is matched to y_j (should it be the diag). + # return the list of (i,j) such that P[i,j] > 0, + #i.e. x_i is matched to y_j (should it be the diag). if withcost: return np.column_stack(res), cost @@ -103,31 +110,38 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): persistence diagrams with only finite coordinates. :param init: The initial value for barycenter estimate. If None, init is made on a random diagram from the dataset. - Otherwise, it must be an int (then we init with diagset[init]) - or a (n x 2) numpy.array enconding a persistence diagram with n points. + Otherwise, it must be an int + (then we init with diagset[init]) + or a (n x 2) numpy.array enconding + a persistence diagram with n points. :param verbose: if True, returns additional information about the barycenter. :returns: If not verbose (default), a numpy.array encoding - the barycenter estimate (local minima of the energy function). + the barycenter estimate + (local minima of the energy function). If verbose, returns a couple (Y, log) where Y is the barycenter estimate, and log is a dict that contains additional informations: - groupings, a list of list of pairs (i,j), - That is, G[k] = [(i, j) ...], where (i,j) indicates that X[i] is matched to Y[j] - if i > len(X) or j > len(Y), it means they represent the diagonal. - - energy, a float representing the Frechet energy value obtained, - that is the mean of squared distances of observations to the output. - - nb_iter, integer representing the number of iterations performed before convergence - of the algorithm. + That is, G[k] = [(i, j) ...], where (i,j) indicates + that X[i] is matched to Y[j] + if i > len(X) or j > len(Y), it means they + represent the diagonal. + - energy, a float representing the Frechet + energy value obtained, + that is the mean of squared distances + of observations to the output. + - nb_iter, integer representing the number of iterations + performed before convergence of the algorithm. """ X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging if m == 0: print("Warning: computing barycenter of empty diag set. Returns None") return None - - nb_off_diag = np.array([len(X_i) for X_i in X]) # store the number of off-diagonal point for each of the X_i - + + # store the number of off-diagonal point for each of the X_i + nb_off_diag = np.array([len(X_i) for X_i in X]) # Initialisation of barycenter if init is None: i0 = np.random.randint(m) # Index of first state for the barycenter @@ -144,7 +158,9 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): while not converged: nb_iter += 1 K = len(Y) # current nb of points in Y (some might be on diagonal) - G = np.zeros((K, m), dtype=int)-1 # will store for each j, the (index) point matched in each other diagram (might be the diagonal). + G = np.zeros((K, m), dtype=int)-1 # will store for each j, the (index) + # point matched in each other diagram + #(might be the diagonal). # that is G[j, i] = k <=> y_j is matched to # x_k in the diagram i-th diagram X[i] updated_points = np.zeros((K, 2)) # will store the new positions of @@ -159,16 +175,19 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): indices = _optimal_matching(Y, X[i]) for y_j, x_i_j in indices: if y_j < K: # we matched an off diagonal point to x_i_j... - if x_i_j < nb_off_diag[i]: # ...which is also an off-diagonal point + # ...which is also an off-diagonal point. + if x_i_j < nb_off_diag[i]: G[y_j, i] = x_i_j else: # ...which is a diagonal point G[y_j, i] = -1 # -1 stands for the diagonal (mask) else: # We matched a diagonal point to x_i_j... - if x_i_j < nb_off_diag[i]: # which is a off-diag point ! so we need to create a new point in Y - new_y = _mean(np.array([X[i][x_i_j]]), m) # Average this point with (m-1) copies of Delta + if x_i_j < nb_off_diag[i]: # which is a off-diag point ! + # need to create new point in Y + new_y = _mean(np.array([X[i][x_i_j]]), m) + # Average this point with (m-1) copies of Delta new_created_points.append(new_y) - # Step 2 : Update current point position thanks to the groupings computed + # Step 2 : Update current point position thanks to groupings computed to_delete = [] for j in range(K): matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] @@ -178,10 +197,10 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): else: # this points is no longer of any use. to_delete.append(j) # we remove the point to be deleted now. - updated_points = np.delete(updated_points, to_delete, axis=0) # cannot be done in-place. - + updated_points = np.delete(updated_points, to_delete, axis=0) - if new_created_points: # we cannot converge if there have been new created points. + # we cannot converge if there have been new created points. + if new_created_points: Y = np.concatenate((updated_points, new_created_points)) else: # Step 3 : we check convergence -- cgit v1.2.3 From 5e4bc93510f50dacdb59f1a7578aca72817c9631 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 17 Feb 2020 17:50:37 +0100 Subject: update doc + removed normalization + use argwhere --- src/python/doc/barycenter_user.rst | 7 ++++++- src/python/gudhi/barycenter.py | 29 ++++++++++++----------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index 714d807e..f81e9358 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -9,7 +9,8 @@ Definition .. include:: barycenter_sum.inc -This implementation is based on ideas from "Frechet means for distribution of persistence diagrams", Turner et al. 2014. +This implementation is based on ideas from "Frechet means for distribution of +persistence diagrams", Turner et al. 2014. Function -------- @@ -21,6 +22,10 @@ Basic example This example computes the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. It is initialized on the 4th diagram, which is the empty diagram. It is encoded by np.array([]). +As the algorithm is not convex, its output depends on the initialization and is only a local minimum of the objective function. +Initialization can be either given as an integer (in which case the i-th diagram of the list is used as initial estimate) +or as a diagram. +If None, it will randomly select one of the diagram of the list as initial estimate. Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. .. testcode:: diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 4a877b4a..c54066ec 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -15,12 +15,6 @@ import scipy.spatial.distance as sc from gudhi.wasserstein import _build_dist_matrix, _perstot -def _proj_on_diag(w): - ''' - Util function to project a point on the diag. - ''' - return np.array([(w[0] + w[1])/2 , (w[0] + w[1])/2]) - def _mean(x, m): """ @@ -32,7 +26,7 @@ def _mean(x, m): k = len(x) if k > 0: w = np.mean(x, axis=0) - w_delta = _proj_on_diag(w) + w_delta = (w[0] + w[1]) / 2 * np.ones(2) return (k * w + (m-k) * w_delta) / m else: return np.array([0, 0]) @@ -80,31 +74,32 @@ def _optimal_matching(X, Y, withcost=False): # we know X, Y are not empty diags now M = _build_dist_matrix(X, Y, order=2, internal_p=2) - a = np.full(n+1, 1. / (n + m) ) - a[-1] = a[-1] * m - b = np.full(m+1, 1. / (n + m) ) - b[-1] = b[-1] * n - P = ot.emd(a=a, b=b, M=M)*(n+m) + a = np.ones(n+1) + a[-1] = m + b = np.ones(m+1) + b[-1] = n + P = ot.emd(a=a, b=b, M=M) # Note : it seems POT returns a permutation matrix in this situation, # ie a vertex of the constraint set (generically true). if withcost: cost = np.sum(np.multiply(P, M)) P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to improve. - res = np.nonzero(P) + res = np.argwhere(P) # return the list of (i,j) such that P[i,j] > 0, #i.e. x_i is matched to y_j (should it be the diag). if withcost: - return np.column_stack(res), cost - - return np.column_stack(res) + return res, cost + return res def lagrangian_barycenter(pdiagset, init=None, verbose=False): """ - Compute the estimated barycenter computed with the algorithm provided + Returns the estimated barycenter computed with the algorithm provided by Turner et al (2014). + As the algorithm is not convex, the output depends on initialization. It is a local minimum of the corresponding Frechet function. + :param pdiagset: a list of size m containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. -- cgit v1.2.3 From 16e80e921e1edbc63398f7dbc342bd25d1f169de Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 17 Feb 2020 17:53:39 +0100 Subject: removed message about empty dgm --- src/python/doc/barycenter_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index f81e9358..59f758fa 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -21,7 +21,7 @@ Basic example ------------- This example computes the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. -It is initialized on the 4th diagram, which is the empty diagram. It is encoded by np.array([]). +It is initialized on the 4th diagram. As the algorithm is not convex, its output depends on the initialization and is only a local minimum of the objective function. Initialization can be either given as an integer (in which case the i-th diagram of the list is used as initial estimate) or as a diagram. -- cgit v1.2.3 From a9b0d8185ecab51428c1aeeb3bf78787420103b2 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 17 Feb 2020 17:54:01 +0100 Subject: specified that the alg returns None if input is empty --- src/python/gudhi/barycenter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index c54066ec..dc9e8241 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -103,6 +103,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): :param pdiagset: a list of size m containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. + If empty, returns None. :param init: The initial value for barycenter estimate. If None, init is made on a random diagram from the dataset. Otherwise, it must be an int -- cgit v1.2.3 From 9af4d25790b0ccc19cfba90f8ab492823fde4623 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 18 Feb 2020 21:10:14 -0400 Subject: Update hera --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index 9a899718..cb1838e6 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit 9a89971855acefe39dce0e2adadf53b88ca8f683 +Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 -- cgit v1.2.3 From 80d84e5d8f9a24de745d23f7d721ea3e62217ff4 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Wed, 19 Feb 2020 12:32:00 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index d899da67..6ad87cdc 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -43,7 +43,7 @@ class TimeDelayEmbedding: A single time-series data. Returns ------- - point clouds : list[list[float, float, float]] + point clouds : list of n x 2 numpy arrays Makes point cloud every a single time-series data. Raises ------- @@ -80,7 +80,7 @@ class TimeDelayEmbedding: the same size. Returns ------- - point clouds : list[list[list[float, float, float]]] + point clouds : list of n x 3 numpy arrays Makes point cloud every a single time-series data. Raises ------- -- cgit v1.2.3 From 9b4258e4f5abb355670afb69d60f3002cb9c27b0 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Wed, 19 Feb 2020 08:29:27 +0100 Subject: Doc review: submodule rephrase [skip ci] --- .github/how_to_use_github_to_contribute_to_gudhi.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md index 0e7d42ef..0a6133b7 100644 --- a/.github/how_to_use_github_to_contribute_to_gudhi.md +++ b/.github/how_to_use_github_to_contribute_to_gudhi.md @@ -25,9 +25,10 @@ This creates a directory gudhi-devel, which you are free to move around or renam cd gudhi-devel ``` -## Submodule -An interface to Hera for Wasserstein distance is available on an external git repository. -Everytime you checkout master or merge from master, afterwards, you will need to run the command: +Everytime you clone the repository, you will have to download the *submodules*. + +## Submodules +An interface to Hera for Wasserstein distance is available on an external git repository. To download it: ```bash git submodule update --init ``` @@ -59,6 +60,11 @@ This is a command you can run quite regularly. It tells git to check all that happened on github. It is safe, it will not mess with your files. +**Reminder:** Everytime you checkout master or merge from master, afterwards, if the version of one the submodule has changed, or if a submodule was added, you will have to: +```bash +git submodule update --init +``` + ## Create a branch, based on the current master ```bash git checkout -b some-fancy-name --no-track upstream/master -- cgit v1.2.3 From f9e77bbeabf307dbc4e6f1521c1567fe9bf1123f Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Wed, 19 Feb 2020 08:49:15 +0100 Subject: Doc review: precaution on contribution [skip ci] --- .github/CONTRIBUTING.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 29fe0aaa..13d6cad7 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -36,8 +36,13 @@ We use Sphinx to generate the code and you will be able to verify the result in ## Something you want to improve in the code -Please first take some time to read our [code conventions](code_conventions.md) +We don't ask for any paperwork but we expect you don't submit anything you are not allowed to: +* check that your work contract and your employer allow you to contribute to this open source project. +* insure you do not violate someone's intellectual property. +* ... + +Please, take some time to read our [code conventions](code_conventions.md) As a convention, we set a Pull Request as a **Draft Pull Request** when we work on something we want the other contributors to see. -We click on **Ready for review** to ask for a peer review of the contribution. \ No newline at end of file +We click on **Ready for review** to ask for a peer review of the contribution. -- cgit v1.2.3 From 59f046cd0f405b124a6e08f26ca7b0248f707374 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 24 Feb 2020 10:14:09 +0100 Subject: update doc for barycenter --- src/python/doc/index.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index 3387a64f..96cd3513 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -71,6 +71,11 @@ Wasserstein distance .. include:: wasserstein_distance_sum.inc +Barycenter +============ + +.. include:: barycenter_sum.inc + Persistence representations =========================== -- cgit v1.2.3 From 3e15e9fe5bffb0ffcf8f7f3a0dac1c331646630a Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 24 Feb 2020 10:14:31 +0100 Subject: changed double quote into simple quote to be consistent with wasserstein.py --- src/python/gudhi/barycenter.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index dc9e8241..4e132c23 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -17,12 +17,12 @@ from gudhi.wasserstein import _build_dist_matrix, _perstot def _mean(x, m): - """ + ''' :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal :returns: the weighted mean of x with (m-k) copies of the diagonal - """ + ''' k = len(x) if k > 0: w = np.mean(x, axis=0) @@ -33,7 +33,7 @@ def _mean(x, m): def _optimal_matching(X, Y, withcost=False): - """ + ''' :param X: numpy.array of size (n x 2) :param Y: numpy.array of size (m x 2) :param withcost: returns also the cost corresponding to the optimal matching @@ -44,7 +44,7 @@ def _optimal_matching(X, Y, withcost=False): if i >= len(X) or j >= len(Y), it means they represent the diagonal. They will be encoded by -1 afterwards. - """ + ''' n = len(X) m = len(Y) @@ -94,7 +94,7 @@ def _optimal_matching(X, Y, withcost=False): def lagrangian_barycenter(pdiagset, init=None, verbose=False): - """ + ''' Returns the estimated barycenter computed with the algorithm provided by Turner et al (2014). As the algorithm is not convex, the output depends on initialization. @@ -129,7 +129,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): of observations to the output. - nb_iter, integer representing the number of iterations performed before convergence of the algorithm. - """ + ''' X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging if m == 0: -- cgit v1.2.3 From 2dc7b150576d959b489d3f52890242fd6a492171 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 24 Feb 2020 13:18:38 +0100 Subject: changed doc for CI ? --- src/python/gudhi/barycenter.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 4e132c23..a41b5906 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -95,11 +95,6 @@ def _optimal_matching(X, Y, withcost=False): def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' - Returns the estimated barycenter computed with the algorithm provided - by Turner et al (2014). - As the algorithm is not convex, the output depends on initialization. - It is a local minimum of the corresponding Frechet function. - :param pdiagset: a list of size m containing numpy.array of shape (n x 2) (n can variate), encoding a set of persistence diagrams with only finite coordinates. -- cgit v1.2.3 From 63e4222e528317b7e0385bf5881393ff2f97fa80 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 24 Feb 2020 16:08:19 +0100 Subject: Fix CI - bad use of skip continuous integration --- .github/for_maintainers/new_gudhi_version_creation.md | 3 --- CMakeLists.txt | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md index 4a40f373..f176d392 100644 --- a/.github/for_maintainers/new_gudhi_version_creation.md +++ b/.github/for_maintainers/new_gudhi_version_creation.md @@ -87,9 +87,6 @@ ln -s @GUDHI_VERSION@ latest * Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version) * Click the *Publish the release* button -***[Where X, Y and Z corresponds respectively to the major, minor, and patch version number]*** - - ## Mail sending Send version mail to the following lists : * gudhi-devel@lists.gforge.inria.fr diff --git a/CMakeLists.txt b/CMakeLists.txt index d9244dc0..0b5f5144 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,7 +65,7 @@ include(GUDHI_user_version_target) # For "make doxygen" - Requires GUDHI_USER_VERSION_DIR to be set - Done in GUDHI_user_version_target for dev version include(GUDHI_doxygen_target) -configure_file(${CMAKE_SOURCE_DIR}/for_dev/for_maintainers/new_gudhi_version_creation.md "${CMAKE_CURRENT_BINARY_DIR}/" @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/.github/for_maintainers/new_gudhi_version_creation.md "${CMAKE_CURRENT_BINARY_DIR}/" @ONLY) message("++ GUDHI_MODULES list is:\"${GUDHI_MODULES}\"") message("++ GUDHI_MISSING_MODULES list is:\"${GUDHI_MISSING_MODULES}\"") -- cgit v1.2.3 From 88964b4ff10798d6d9c3d0a342c004ee6b8b1496 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Tue, 25 Feb 2020 13:21:55 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 89 +++++++++++++++---------------- 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index 6ad87cdc..d7a1dab7 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -8,10 +8,12 @@ import numpy as np + class TimeDelayEmbedding: """Point cloud transformation class. Embeds time-series data in the R^d according to Takens' Embedding Theorem and obtains the coordinates of each point. + Parameters ---------- dim : int, optional (default=3) @@ -20,16 +22,27 @@ class TimeDelayEmbedding: Time-Delay embedding. skip : int, optional (default=1) How often to skip embedded points. - Given delay=3 and skip=2, an point cloud which is obtained by embedding - a single time-series data into R^3 is as follows. - - .. code-block:: none - - time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - point clouds = [[1, 4, 7], - [3, 6, 9]] - + + Example + ------- + + Given delay=3 and skip=2, a point cloud which is obtained by embedding + a scalar time-series into R^3 is as follows:: + + time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + point cloud = [[1, 4, 7], + [3, 6, 9]] + + Given delay=1 and skip=1, a point cloud which is obtained by embedding + a 2D vector time-series data into R^4 is as follows:: + + time-series = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + point cloud = [[0, 1, 2, 3], + [2, 3, 4, 5], + [4, 5, 6, 7], + [6, 7, 8, 9]] """ + def __init__(self, dim=3, delay=1, skip=1): self._dim = dim self._delay = delay @@ -39,56 +52,42 @@ class TimeDelayEmbedding: """Transform method for single time-series data. Parameters ---------- - ts : list[float] - A single time-series data. + ts : Iterable[float] or Iterable[Iterable[float]] + A single time-series data, with scalar or vector values. + Returns ------- - point clouds : list of n x 2 numpy arrays - Makes point cloud every a single time-series data. - Raises - ------- - TypeError - If the parameter's type does not match the desired type. + point cloud : n x dim numpy arrays + Makes point cloud from a single time-series data. """ - ndts = np.array(ts) - if ndts.ndim == 1: - return self._transform(ndts) - else: - raise TypeError("Expects 1-dimensional array.") + return self._transform(np.array(ts)) def fit(self, ts, y=None): return self def _transform(self, ts): """Guts of transform method.""" - return ts[ - np.add.outer( - np.arange(0, len(ts)-self._delay*(self._dim-1), self._skip), - np.arange(0, self._dim*self._delay, self._delay)) - ] + if ts.ndim == 1: + repeat = self._dim + else: + assert self._dim % ts.shape[1] == 0 + repeat = self._dim // ts.shape[1] + end = len(ts) - self._delay * (repeat - 1) + short = np.arange(0, end, self._skip) + vertical = np.arange(0, repeat * self._delay, self._delay) + return ts[np.add.outer(short, vertical)].reshape(len(short), -1) def transform(self, ts): """Transform method for multiple time-series data. + Parameters ---------- - ts : list[list[float]] - Multiple time-series data. - Attributes - ---------- - ndts : - The ndts means that all time series need to have exactly - the same size. + ts : Iterable[Iterable[float]] or Iterable[Iterable[Iterable[float]]] + Multiple time-series data, with scalar or vector values. + Returns ------- - point clouds : list of n x 3 numpy arrays - Makes point cloud every a single time-series data. - Raises - ------- - TypeError - If the parameter's type does not match the desired type. + point clouds : list of n x dim numpy arrays + Makes point cloud from each time-series data. """ - ndts = np.array(ts) - if ndts.ndim == 2: - return np.apply_along_axis(self._transform, 1, ndts) - else: - raise TypeError("Expects 2-dimensional array.") + return [self._transform(np.array(s)) for s in ts] -- cgit v1.2.3 From 66c96498b994fea1fcaa6877121023410f4209f9 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Tue, 25 Feb 2020 13:24:48 +0900 Subject: Update test_time_delay.py --- src/python/test/test_time_delay.py | 51 ++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py index 5464a185..1ead9bca 100755 --- a/src/python/test/test_time_delay.py +++ b/src/python/test/test_time_delay.py @@ -1,36 +1,43 @@ from gudhi.point_cloud.timedelay import TimeDelayEmbedding import numpy as np + def test_normal(): # Sample array ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Normal case. prep = TimeDelayEmbedding() - attractor = prep(ts) - assert (attractor[0] == np.array([1, 2, 3])).all() - assert (attractor[1] == np.array([2, 3, 4])).all() - assert (attractor[2] == np.array([3, 4, 5])).all() - assert (attractor[3] == np.array([4, 5, 6])).all() - assert (attractor[4] == np.array([5, 6, 7])).all() - assert (attractor[5] == np.array([6, 7, 8])).all() - assert (attractor[6] == np.array([7, 8, 9])).all() - assert (attractor[7] == np.array([8, 9, 10])).all() + pointclouds = prep(ts) + assert (pointclouds[0] == np.array([1, 2, 3])).all() + assert (pointclouds[1] == np.array([2, 3, 4])).all() + assert (pointclouds[2] == np.array([3, 4, 5])).all() + assert (pointclouds[3] == np.array([4, 5, 6])).all() + assert (pointclouds[4] == np.array([5, 6, 7])).all() + assert (pointclouds[5] == np.array([6, 7, 8])).all() + assert (pointclouds[6] == np.array([7, 8, 9])).all() + assert (pointclouds[7] == np.array([8, 9, 10])).all() # Delay = 3 prep = TimeDelayEmbedding(delay=3) - attractor = prep(ts) - assert (attractor[0] == np.array([1, 4, 7])).all() - assert (attractor[1] == np.array([2, 5, 8])).all() - assert (attractor[2] == np.array([3, 6, 9])).all() - assert (attractor[3] == np.array([4, 7, 10])).all() + pointclouds = prep(ts) + assert (pointclouds[0] == np.array([1, 4, 7])).all() + assert (pointclouds[1] == np.array([2, 5, 8])).all() + assert (pointclouds[2] == np.array([3, 6, 9])).all() + assert (pointclouds[3] == np.array([4, 7, 10])).all() # Skip = 3 prep = TimeDelayEmbedding(skip=3) - attractor = prep(ts) - assert (attractor[0] == np.array([1, 2, 3])).all() - assert (attractor[1] == np.array([4, 5, 6])).all() - assert (attractor[2] == np.array([7, 8, 9])).all() + pointclouds = prep(ts) + assert (pointclouds[0] == np.array([1, 2, 3])).all() + assert (pointclouds[1] == np.array([4, 5, 6])).all() + assert (pointclouds[2] == np.array([7, 8, 9])).all() # Delay = 2 / Skip = 2 prep = TimeDelayEmbedding(delay=2, skip=2) - attractor = prep(ts) - assert (attractor[0] == np.array([1, 3, 5])).all() - assert (attractor[1] == np.array([3, 5, 7])).all() - assert (attractor[2] == np.array([5, 7, 9])).all() + pointclouds = prep(ts) + assert (pointclouds[0] == np.array([1, 3, 5])).all() + assert (pointclouds[1] == np.array([3, 5, 7])).all() + assert (pointclouds[2] == np.array([5, 7, 9])).all() + + # Vector series + ts = np.arange(0, 10).reshape(-1, 2) + prep = TimeDelayEmbedding(dim=4) + prep.fit([ts]) + assert (prep.transform([ts])[0] == [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7], [6, 7, 8, 9]]).all() -- cgit v1.2.3 From a74ec878560bbe5fa340b2650ca9c16471b685af Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Tue, 25 Feb 2020 13:27:03 +0900 Subject: Update point_cloud.rst --- src/python/doc/point_cloud.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index 55c74ff3..c0d4b303 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -26,4 +26,5 @@ TimeDelayEmbedding .. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding :members: + :special-members: __call__ -- cgit v1.2.3 From f25d0f86fcd4ac9ab2939b2919d7a66df8b21269 Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Tue, 25 Feb 2020 16:35:41 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index d7a1dab7..576f4386 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -50,6 +50,7 @@ class TimeDelayEmbedding: def __call__(self, ts): """Transform method for single time-series data. + Parameters ---------- ts : Iterable[float] or Iterable[Iterable[float]] -- cgit v1.2.3 From 2c1edeb7fd241c8718a22618438b482704703b4a Mon Sep 17 00:00:00 2001 From: mtakenouchi Date: Tue, 25 Feb 2020 17:46:28 +0900 Subject: Update timedelay.py --- src/python/gudhi/point_cloud/timedelay.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index 576f4386..f01df442 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -11,8 +11,9 @@ import numpy as np class TimeDelayEmbedding: """Point cloud transformation class. - Embeds time-series data in the R^d according to Takens' Embedding Theorem - and obtains the coordinates of each point. + Embeds time-series data in the R^d according to [Takens' Embedding Theorem] + (https://en.wikipedia.org/wiki/Takens%27s_theorem) and obtains the + coordinates of each point. Parameters ---------- -- cgit v1.2.3 From 835a831007196a4d93e57659ab8d3cdb28a4ef92 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 25 Feb 2020 18:14:06 +0100 Subject: Revert "Global improvement of rendering with Python tools" This reverts commit 34e1ae726e27fdd7c41f6d80d8ed7f6504dc3a0d. --- src/python/gudhi/persistence_graphical_tools.py | 92 ++++--------------------- 1 file changed, 15 insertions(+), 77 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 4a690241..246280de 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -5,7 +5,6 @@ # Copyright (C) 2016 Inria # # Modification(s): -# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility. # - YYYY/MM Author: Description of the modification from os import path @@ -44,7 +43,6 @@ def __min_birth_max_death(persistence, band=0.0): max_death += band return (min_birth, max_death) - def plot_persistence_barcode( persistence=[], persistence_file="", @@ -54,9 +52,7 @@ def plot_persistence_barcode( inf_delta=0.1, legend=False, colormap=None, - axes=None, - fontsize=16, - title="Persistence barcode" + axes=None ): """This function plots the persistence bar code from persistence values list or from a :doc:`persistence file `. @@ -85,18 +81,11 @@ def plot_persistence_barcode( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` - :param fontsize: Fontsize to use in axis. - :type fontsize: int - :param title: title for the plot. - :type title: string :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches - from matplotlib import rc - plt.rc('text', usetex=True) - plt.rc('font', family='serif') if persistence_file != "": if path.isfile(persistence_file): @@ -174,7 +163,7 @@ def plot_persistence_barcode( loc="lower right", ) - axes.set_title(title) + axes.set_title("Persistence barcode") # Ends plot on infinity value and starts a little bit before min_birth axes.axis([axis_start, infinity, 0, ind]) @@ -194,11 +183,7 @@ def plot_persistence_diagram( inf_delta=0.1, legend=False, colormap=None, - axes=None, - aspect_equal=False, - fontsize=16, - title="Persistence diagram", - greyblock=True + axes=None ): """This function plots the persistence diagram from persistence values list or from a :doc:`persistence file `. @@ -229,23 +214,11 @@ def plot_persistence_diagram( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` - :param aspect_equal: if True, force plot to be square shaped. - :type aspect_equal: boolean - :param fontsize: Fontsize to use in axis. - :type fontsize: int - :param title: title for the plot. - :type title: string - :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True. - :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches - from matplotlib import rc - plt.rc('text', usetex=True) - plt.rc('font', family='serif') - if persistence_file != "": if path.isfile(persistence_file): @@ -283,27 +256,18 @@ def plot_persistence_diagram( # Replace infinity values with max_death + delta for diagram to be more # readable infinity = max_death + delta - axis_end = max_death + delta / 2 axis_start = min_birth - delta # line display of equation : birth = death x = np.linspace(axis_start, infinity, 1000) # infinity line and text - axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k") - axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha) - # Infinity label - yt = axes.get_yticks() - yt = np.append(yt, infinity) - ytl = yt.tolist() - ytl[-1] = r'$+\infty$' - axes.set_yticks(yt) - axes.set_yticklabels(ytl) + axes.plot(x, x, color="k", linewidth=1.0) + axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha) + axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha) # bootstrap band if band > 0.0: axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red") - # lower diag patch - if greyblock: - axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey')) + # Draw points in loop for interval in reversed(persistence): if float(interval[1][1]) != float("inf"): @@ -329,13 +293,11 @@ def plot_persistence_diagram( ] ) - axes.set_xlabel("Birth", fontsize=fontsize) - axes.set_ylabel("Death", fontsize=fontsize) + axes.set_xlabel("Birth") + axes.set_ylabel("Death") # Ends plot on infinity value and starts a little bit before min_birth - axes.axis([axis_start, axis_end, axis_start, infinity + delta]) - axes.set_title(title, fontsize=fontsize) # a different fontsize for the title? - if aspect_equal: - axes.set_aspect("equal") + axes.axis([axis_start, infinity, axis_start, infinity + delta]) + axes.set_title("Persistence diagram") return axes except ImportError: @@ -351,11 +313,7 @@ def plot_persistence_density( dimension=None, cmap=None, legend=False, - axes=None, - aspect_equal=False, - fontsize=16, - title="Persistence density", - greyblock=True + axes=None ): """This function plots the persistence density from persistence values list or from a :doc:`persistence file `. Be @@ -397,25 +355,11 @@ def plot_persistence_density( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` - :param aspect_equal: if True, force plot to be square shaped. - :type aspect_equal: boolean - :param fontsize: Fontsize to use in axis. - :type fontsize: int - :param title: title for the plot. - :type title: string - :param greyblock: if we want to plot a grey patch on the lower half plane - for nicer rendering. Default True. - :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt - import matplotlib.patches as mpatches from scipy.stats import kde - from matplotlib import rc - plt.rc('text', usetex=True) - plt.rc('font', family='serif') - if persistence_file != "": if dimension is None: @@ -474,18 +418,12 @@ def plot_persistence_density( # Make the plot img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap) - if greyblock: - axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey')) - if legend: plt.colorbar(img, ax=axes) - axes.set_xlabel("Birth", fontsize=fontsize) - axes.set_ylabel("Death", fontsize=fontsize) - axes.set_title(title, fontsize=fontsize) - if aspect_equal: - axes.set_aspect("equal") - + axes.set_xlabel("Birth") + axes.set_ylabel("Death") + axes.set_title("Persistence density") return axes except ImportError: -- cgit v1.2.3 From cdcd2904a1c682625670a62608fd781bfd571516 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 25 Feb 2020 18:19:24 +0100 Subject: solved scale issue and removed title/aspect as functions return ax --- src/python/gudhi/persistence_graphical_tools.py | 77 +++++++++++++++++++------ 1 file changed, 60 insertions(+), 17 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 246280de..8ddfdba8 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -5,6 +5,7 @@ # Copyright (C) 2016 Inria # # Modification(s): +# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility. # - YYYY/MM Author: Description of the modification from os import path @@ -43,6 +44,7 @@ def __min_birth_max_death(persistence, band=0.0): max_death += band return (min_birth, max_death) + def plot_persistence_barcode( persistence=[], persistence_file="", @@ -52,7 +54,8 @@ def plot_persistence_barcode( inf_delta=0.1, legend=False, colormap=None, - axes=None + axes=None, + fontsize=16, ): """This function plots the persistence bar code from persistence values list or from a :doc:`persistence file `. @@ -81,11 +84,16 @@ def plot_persistence_barcode( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param fontsize: Fontsize to use in axis. + :type fontsize: int :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') if persistence_file != "": if path.isfile(persistence_file): @@ -163,7 +171,7 @@ def plot_persistence_barcode( loc="lower right", ) - axes.set_title("Persistence barcode") + axes.set_title("Persistence barcode", fontsize=fontsize) # Ends plot on infinity value and starts a little bit before min_birth axes.axis([axis_start, infinity, 0, ind]) @@ -183,7 +191,9 @@ def plot_persistence_diagram( inf_delta=0.1, legend=False, colormap=None, - axes=None + axes=None, + fontsize=16, + greyblock=True ): """This function plots the persistence diagram from persistence values list or from a :doc:`persistence file `. @@ -214,11 +224,19 @@ def plot_persistence_diagram( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param fontsize: Fontsize to use in axis. + :type fontsize: int + :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True. + :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') + if persistence_file != "": if path.isfile(persistence_file): @@ -256,18 +274,27 @@ def plot_persistence_diagram( # Replace infinity values with max_death + delta for diagram to be more # readable infinity = max_death + delta + axis_end = max_death + delta / 2 axis_start = min_birth - delta - # line display of equation : birth = death - x = np.linspace(axis_start, infinity, 1000) # infinity line and text - axes.plot(x, x, color="k", linewidth=1.0) - axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha) - axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha) + axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k") + axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha) + # Infinity label + yt = axes.get_yticks() + yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity + yt = np.append(yt, infinity) + ytl = ["%.3f" % e for e in yt] # to avoid float precision error + ytl[-1] = r'$+\infty$' + axes.set_yticks(yt) + axes.set_yticklabels(ytl) # bootstrap band if band > 0.0: + x = np.linspace(axis_start, infinity, 1000) axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red") - + # lower diag patch + if greyblock: + axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey')) # Draw points in loop for interval in reversed(persistence): if float(interval[1][1]) != float("inf"): @@ -293,11 +320,11 @@ def plot_persistence_diagram( ] ) - axes.set_xlabel("Birth") - axes.set_ylabel("Death") + axes.set_xlabel("Birth", fontsize=fontsize) + axes.set_ylabel("Death", fontsize=fontsize) + axes.set_title("Persistence diagram", fontsize=fontsize) # Ends plot on infinity value and starts a little bit before min_birth - axes.axis([axis_start, infinity, axis_start, infinity + delta]) - axes.set_title("Persistence diagram") + axes.axis([axis_start, axis_end, axis_start, infinity + delta/2]) return axes except ImportError: @@ -313,7 +340,9 @@ def plot_persistence_density( dimension=None, cmap=None, legend=False, - axes=None + axes=None, + fontsize=16, + greyblock=True ): """This function plots the persistence density from persistence values list or from a :doc:`persistence file `. Be @@ -355,11 +384,21 @@ def plot_persistence_density( :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on a new set of axes. :type axes: `matplotlib.axes.Axes` + :param fontsize: Fontsize to use in axis. + :type fontsize: int + :param greyblock: if we want to plot a grey patch on the lower half plane + for nicer rendering. Default True. + :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ try: import matplotlib.pyplot as plt + import matplotlib.patches as mpatches from scipy.stats import kde + from matplotlib import rc + plt.rc('text', usetex=True) + plt.rc('font', family='serif') + if persistence_file != "": if dimension is None: @@ -418,12 +457,16 @@ def plot_persistence_density( # Make the plot img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap) + if greyblock: + axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey')) + if legend: plt.colorbar(img, ax=axes) - axes.set_xlabel("Birth") - axes.set_ylabel("Death") - axes.set_title("Persistence density") + axes.set_xlabel("Birth", fontsize=fontsize) + axes.set_ylabel("Death", fontsize=fontsize) + axes.set_title("Persistence density", fontsize=fontsize) + return axes except ImportError: -- cgit v1.2.3 From 3ecf0caf4efbea0fabf4af0df490900374abda8b Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 25 Feb 2020 18:20:30 +0100 Subject: say in doc that functions return ax --- src/python/doc/persistence_graphical_tools_sum.inc | 6 +++--- src/python/doc/persistence_graphical_tools_user.rst | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index 0cdf8072..2ddaccfc 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -3,10 +3,10 @@ +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau | - | img/graphical_tools_representation.png | the user to build easily persistence barcode, diagram or density. | | + | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | | | | :Introduced in: GUDHI 2.0.0 | - | | | | - | | | :Copyright: MIT | + | | Note that these functions return the matplotlib axis, allowing | | + | | for further modifications (title, aspect, etc.) | :Copyright: MIT | | | | | | | | :Requires: matplotlib, numpy and scipy | +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst index 80002db6..ff51604e 100644 --- a/src/python/doc/persistence_graphical_tools_user.rst +++ b/src/python/doc/persistence_graphical_tools_user.rst @@ -20,7 +20,7 @@ This function can display the persistence result as a barcode: .. plot:: :include-source: - import matplotlib.pyplot as plot + import matplotlib.pyplot as plt import gudhi off_file = gudhi.__root_source_dir__ + '/data/points/tore3D_300.off' @@ -31,7 +31,7 @@ This function can display the persistence result as a barcode: diag = simplex_tree.persistence(min_persistence=0.4) gudhi.plot_persistence_barcode(diag) - plot.show() + plt.show() Show persistence as a diagram ----------------------------- @@ -44,15 +44,19 @@ This function can display the persistence result as a diagram: .. plot:: :include-source: - import matplotlib.pyplot as plot + import matplotlib.pyplot as plt import gudhi # rips_on_tore3D_1307.pers obtained from write_persistence_diagram method persistence_file=gudhi.__root_source_dir__ + \ '/data/persistence_diagram/rips_on_tore3D_1307.pers' - gudhi.plot_persistence_diagram(persistence_file=persistence_file, + ax = gudhi.plot_persistence_diagram(persistence_file=persistence_file, legend=True) - plot.show() + # We can modify the title, aspect, etc. + ax.set_title("Persistence diagram of a torus") + ax.set_aspect("equal") # forces to be square shaped + plt.show() + Persistence density ------------------- @@ -65,7 +69,7 @@ If you want more information on a specific dimension, for instance: .. plot:: :include-source: - import matplotlib.pyplot as plot + import matplotlib.pyplot as plt import gudhi # rips_on_tore3D_1307.pers obtained from write_persistence_diagram method persistence_file=gudhi.__root_source_dir__ + \ @@ -75,9 +79,9 @@ If you want more information on a specific dimension, for instance: only_this_dim=1) pers_diag = [(1, elt) for elt in birth_death] # Use subplots to display diagram and density side by side - fig, axes = plot.subplots(nrows=1, ncols=2, figsize=(12, 5)) + fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5)) gudhi.plot_persistence_diagram(persistence=pers_diag, axes=axes[0]) gudhi.plot_persistence_density(persistence=pers_diag, dimension=1, legend=True, axes=axes[1]) - plot.show() + plt.show() -- cgit v1.2.3 From cbb350d81a8c4acadf31b604aaebde209f462e55 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 26 Feb 2020 09:32:32 +0100 Subject: Code review: remove import pytest leftovers --- src/python/test/test_euclidean_witness_complex.py | 1 - src/python/test/test_rips_complex.py | 1 - src/python/test/test_simplex_tree.py | 1 - src/python/test/test_tangential_complex.py | 1 - 4 files changed, 4 deletions(-) diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py index 47196a2a..f3664d39 100755 --- a/src/python/test/test_euclidean_witness_complex.py +++ b/src/python/test/test_euclidean_witness_complex.py @@ -9,7 +9,6 @@ """ import gudhi -import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py index f5c086cb..b86e7498 100755 --- a/src/python/test/test_rips_complex.py +++ b/src/python/test/test_rips_complex.py @@ -10,7 +10,6 @@ from gudhi import RipsComplex from math import sqrt -import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index eca3807b..04b26e92 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -9,7 +9,6 @@ """ from gudhi import SimplexTree -import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py index fc500c45..8668a2e0 100755 --- a/src/python/test/test_tangential_complex.py +++ b/src/python/test/test_tangential_complex.py @@ -9,7 +9,6 @@ """ from gudhi import TangentialComplex, SimplexTree -import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" -- cgit v1.2.3 From b2c1cf839080efa43835d7b0fdcd6a38f6808255 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 26 Feb 2020 10:20:00 +0100 Subject: Fix #229 incomplete citation in nerve_GIC python documentation --- src/python/gudhi/nerve_gic.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx index 382e71c5..45cc8eba 100644 --- a/src/python/gudhi/nerve_gic.pyx +++ b/src/python/gudhi/nerve_gic.pyx @@ -187,7 +187,7 @@ cdef class CoverComplex: def set_automatic_resolution(self): """Computes the optimal length of intervals (i.e. the smallest interval - length avoiding discretization artifacts—see :cite:`Carriere17c`) for a + length avoiding discretization artifacts - see :cite:`Carriere17c`) for a functional cover. :rtype: double @@ -288,7 +288,7 @@ cdef class CoverComplex: def set_graph_from_automatic_rips(self, N=100): """Creates a graph G from a Rips complex whose threshold value is - automatically tuned with subsampling—see. + automatically tuned with subsampling - see :cite:`Carriere17c`. :param N: Number of subsampling iteration (the default reasonable value is 100, but there is no guarantee on how to choose it). -- cgit v1.2.3 From f85742957276cbd15a2724c86cbc7a8279d62ef9 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 26 Feb 2020 11:11:32 +0100 Subject: Code review: add some comments about range.begin() and range.end() --- src/python/include/Simplex_tree_interface.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 55d5af97..66ce5afd 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -124,18 +124,22 @@ class Simplex_tree_interface : public Simplex_tree { // Iterator over the simplex tree typename std::vector::const_iterator get_filtration_iterator_begin() { // Base::initialize_filtration(); already performed in filtration_simplex_range + // this specific case works because the range is just a pair of iterators - won't work if range was a vector return Base::filtration_simplex_range().begin(); } typename std::vector::const_iterator get_filtration_iterator_end() { + // this specific case works because the range is just a pair of iterators - won't work if range was a vector return Base::filtration_simplex_range().end(); } Skeleton_simplex_iterator get_skeleton_iterator_begin(int dimension) { + // this specific case works because the range is just a pair of iterators - won't work if range was a vector return Base::skeleton_simplex_range(dimension).begin(); } Skeleton_simplex_iterator get_skeleton_iterator_end(int dimension) { + // this specific case works because the range is just a pair of iterators - won't work if range was a vector return Base::skeleton_simplex_range(dimension).end(); } }; -- cgit v1.2.3 From 0998cecac7f15e3c68058d33acc21fb427f803e9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 28 Feb 2020 11:18:59 +0100 Subject: shorten < 80 char the doc --- src/python/doc/barycenter_user.rst | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst index 59f758fa..83e9bebb 100644 --- a/src/python/doc/barycenter_user.rst +++ b/src/python/doc/barycenter_user.rst @@ -20,13 +20,17 @@ Function Basic example ------------- -This example computes the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. +This example computes the Frechet mean (aka Wasserstein barycenter) between +four persistence diagrams. It is initialized on the 4th diagram. -As the algorithm is not convex, its output depends on the initialization and is only a local minimum of the objective function. -Initialization can be either given as an integer (in which case the i-th diagram of the list is used as initial estimate) -or as a diagram. -If None, it will randomly select one of the diagram of the list as initial estimate. -Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. +As the algorithm is not convex, its output depends on the initialization and +is only a local minimum of the objective function. +Initialization can be either given as an integer (in which case the i-th +diagram of the list is used as initial estimate) or as a diagram. +If None, it will randomly select one of the diagram of the list +as initial estimate. +Note that persistence diagrams must be submitted as +(n x 2) numpy arrays and must not contain inf values. .. testcode:: @@ -37,8 +41,8 @@ Note that persistence diagrams must be submitted as (n x 2) numpy arrays and mus dg2 = np.array([[0.2, 0.7]]) dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) dg4 = np.array([]) - - bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3) + pdiagset = [dg1, dg2, dg3, dg4] + bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=pdiagset,init=3) message = "Wasserstein barycenter estimated:" print(message) -- cgit v1.2.3 From f8c251b1c1b7a1c8c36e77f56cda1fd41245adb7 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 2 Mar 2020 13:44:38 +0100 Subject: [skip ci] update next release with hera integration --- .github/next_release.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/next_release.md b/.github/next_release.md index a2805a55..3166b0a8 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -1,19 +1,20 @@ We are pleased to announce the release 3.X.X of the GUDHI library. -As a major new feature, the GUDHI library now offers ... +As a major new feature, the GUDHI library now offers a Python interface to [Hera](https://bitbucket.org/grey_narn/hera/src/master/) to compute the Wasserstein distance. +[PyBind11](https://github.com/pybind/pybind11) is now required to build the Python module. We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). -Below is a list of changes made since GUDHI 3.X-1.X-1: +Below is a list of changes made since GUDHI 3.1.1: -- [Module](link) - - ... +- [Wassertein distance](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html) + - An another implementation comes from Hera (BSD-3-Clause) which is based on [Geometry Helps to Compare Persistence Diagrams](http://doi.acm.org/10.1145/3064175) by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov. - [Module](link) - ... - Miscellaneous - - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub. + - The [list of bugs that were solved since GUDHI-3.1.1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.2.0+is%3Aclosed) is available on GitHub. All modules are distributed under the terms of the MIT license. However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. -- cgit v1.2.3 From d2943b9e7311c8a3d8a4fb379c39b15497481b9c Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 2 Mar 2020 13:50:35 +0100 Subject: [skip ci] add a note on next_release.md file --- .github/how_to_use_github_to_contribute_to_gudhi.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md index 0a6133b7..6ab05e36 100644 --- a/.github/how_to_use_github_to_contribute_to_gudhi.md +++ b/.github/how_to_use_github_to_contribute_to_gudhi.md @@ -90,6 +90,8 @@ Because of `-u`, it will remember where you like to push this branch, and next t Possibly iterate a few times, add more commits and push them. ## Your pull request is ready +Do not forget to update `.github/next_release.md` to announce your development in the next release note. + Get your web browser to https://github.com/LOGIN/gudhi-devel, click on the button that says **Branch: some-name** (below the number of commits, above the list of files) and select the branch you are so proud of. Click on **New pull request** next to it. -- cgit v1.2.3 From 8e4f3d151818b78a29d11cdc6ca171947bfd6dd9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 3 Mar 2020 15:33:17 +0100 Subject: update wasserstein distance with pot so that it can return optimal matching now! --- src/python/doc/wasserstein_distance_user.rst | 24 ++++++++++ src/python/gudhi/wasserstein.py | 69 ++++++++++++++++++++++------ src/python/test/test_wasserstein_distance.py | 31 +++++++++---- 3 files changed, 102 insertions(+), 22 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 94b454e2..d3daa318 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -47,3 +47,27 @@ The output is: .. testoutput:: Wasserstein distance value = 1.45 + +We can also have access to the optimal matching by letting `matching=True`. +It is encoded as a list of indices (i,j), meaning that the i-th point in X +is mapped to the j-th point in Y. +An index of -1 represents the diagonal. + +.. testcode:: + + import gudhi.wasserstein + import numpy as np + + diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) + diag2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]]) + cost, matching = gudhi.wasserstein.wasserstein_distance(diag1, diag2, matching=True, order=1., internal_p=2.) + + message = "Wasserstein distance value = %.2f, optimal matching: %s" %(cost, matching) + print(message) + +The output is: + +.. testoutput:: + + Wasserstein distance value = 2.15, optimal matching: [(0, 0), (1, 2), (2, -1), (-1, 1)] + diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 13102094..ba0f7343 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -62,14 +62,39 @@ def _perstot(X, order, internal_p): return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) -def wasserstein_distance(X, Y, order=2., internal_p=2.): +def _clean_match(match, n, m): ''' - :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + :param match: a list of the form [(i,j) ...] + :param n: int, size of the first dgm + :param m: int, size of the second dgm + :return: a modified version of match where indices greater than n, m are replaced by -1, encoding the diagonal. + and (-1, -1) are removed + ''' + new_match = [] + for i,j in match: + if i >= n: + if j < m: + new_match.append((-1, j)) + elif j >= m: + if i < n: + new_match.append((i,-1)) + else: + new_match.append((i,j)) + return new_match + + +def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): + ''' + :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points + (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. + :param matching: if True, computes and returns the optimal matching between X and Y, encoded as... :param order: exponent for Wasserstein; Default value is 2. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). - :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. - :rtype: float + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); + Default value is 2 (Euclidean norm). + :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with + respect to the internal_p-norm as ground metric. + If matching is set to True, also returns the optimal matching between X and Y. ''' n = len(X) m = len(Y) @@ -77,21 +102,39 @@ def wasserstein_distance(X, Y, order=2., internal_p=2.): # handle empty diagrams if X.size == 0: if Y.size == 0: - return 0. + if not matching: + return 0. + else: + return 0., [] else: - return _perstot(Y, order, internal_p) + if not matching: + return _perstot(Y, order, internal_p) + else: + return _perstot(Y, order, internal_p), [(-1, j) for j in range(m)] elif Y.size == 0: - return _perstot(X, order, internal_p) + if not matching: + return _perstot(X, order, internal_p) + else: + return _perstot(X, order, internal_p), [(i, -1) for i in range(n)] M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) - a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. - a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT - b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here. - b[-1] = b[-1] * n # so that we have a probability measure, required by POT + a = np.ones(n+1) # weight vector of the input diagram. Uniform here. + a[-1] = m + b = np.ones(m+1) # weight vector of the input diagram. Uniform here. + b[-1] = n + + if matching: + P = ot.emd(a=a,b=b,M=M, numItermax=2000000) + ot_cost = np.sum(np.multiply(P,M)) + P[P < 0.5] = 0 # trick to avoid numerical issue, could it be improved? + match = np.argwhere(P) + # Now we turn to -1 points encoding the diagonal + match = _clean_match(match, n, m) + return ot_cost ** (1./order) , match # Comptuation of the otcost using the ot.emd2 library. # Note: it is the Wasserstein distance to the power q. # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? - ot_cost = (n+m) * ot.emd2(a, b, M, numItermax=2000000) + ot_cost = ot.emd2(a, b, M, numItermax=2000000) return ot_cost ** (1./order) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 6a6b217b..02a1d2c9 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -51,14 +51,27 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True): assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == approx(np.sqrt(5)) assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == approx(np.sqrt(5)) - if(not test_infinity): - return + if test_infinity: + diag5 = np.array([[0, 3], [4, np.inf]]) + diag6 = np.array([[7, 8], [4, 6], [3, np.inf]]) - diag5 = np.array([[0, 3], [4, np.inf]]) - diag6 = np.array([[7, 8], [4, 6], [3, np.inf]]) + assert wasserstein_distance(diag4, diag5) == np.inf + assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.) + + + if test_matching: + match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1] + assert match == [] + match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] + assert match == [] + match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1] + assert match == [(-1, 0), (-1, 1)] + match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] + assert match == [(0, -1), (1, -1)] + match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] + assert match == [(0, 0), (1, 1), (2, -1)] + - assert wasserstein_distance(diag4, diag5) == np.inf - assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.) def hera_wrap(delta): def fun(*kargs,**kwargs): @@ -66,8 +79,8 @@ def hera_wrap(delta): return fun def test_wasserstein_distance_pot(): - _basic_wasserstein(pot, 1e-15, test_infinity=False) + _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) def test_wasserstein_distance_hera(): - _basic_wasserstein(hera_wrap(1e-12), 1e-12) - _basic_wasserstein(hera_wrap(.1), .1) + _basic_wasserstein(hera_wrap(1e-12), 1e-12, test_matching=False) + _basic_wasserstein(hera_wrap(.1), .1, test_matching=False) -- cgit v1.2.3 From 6225d21fa6fd87edf10731df87cd3a7099049358 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 3 Mar 2020 16:02:44 +0100 Subject: Add a unitary test for Delaunay complex --- src/Alpha_complex/test/CMakeLists.txt | 4 ++ .../test/Delaunay_complex_unit_test.cpp | 72 ++++++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 src/Alpha_complex/test/Delaunay_complex_unit_test.cpp diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt index 0476c6d4..fe4b23e4 100644 --- a/src/Alpha_complex/test/CMakeLists.txt +++ b/src/Alpha_complex/test/CMakeLists.txt @@ -8,11 +8,15 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) add_executable ( Alpha_complex_test_unit Alpha_complex_unit_test.cpp ) target_link_libraries(Alpha_complex_test_unit ${CGAL_LIBRARY}) + add_executable ( Delaunay_complex_test_unit Delaunay_complex_unit_test.cpp ) + target_link_libraries(Delaunay_complex_test_unit ${CGAL_LIBRARY}) if (TBB_FOUND) target_link_libraries(Alpha_complex_test_unit ${TBB_LIBRARIES}) + target_link_libraries(Delaunay_complex_test_unit ${TBB_LIBRARIES}) endif() gudhi_add_boost_test(Alpha_complex_test_unit) + gudhi_add_boost_test(Delaunay_complex_test_unit) add_executable ( Alpha_complex_3d_test_unit Alpha_complex_3d_unit_test.cpp ) target_link_libraries(Alpha_complex_3d_test_unit ${CGAL_LIBRARY}) diff --git a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp new file mode 100644 index 00000000..71164705 --- /dev/null +++ b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp @@ -0,0 +1,72 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Vincent Rouvreau + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE "delaunay_complex" +#include +#include + +#include +#include + +#include +#include // NaN +#include + +#include +// to construct a simplex_tree from Delaunay_triangulation +#include +#include +#include +#include + +// Use dynamic_dimension_tag for the user to be able to set dimension +typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d; +// Use static dimension_tag for the user not to be able to set dimension +typedef CGAL::Epeck_d< CGAL::Dimension_tag<5> > Exact_kernel_s; +// Use dynamic_dimension_tag for the user to be able to set dimension +typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d; +// Use static dimension_tag for the user not to be able to set dimension +typedef CGAL::Epick_d< CGAL::Dimension_tag<5> > Inexact_kernel_s; +// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter + +typedef boost::mpl::list list_of_kernel_variants; + +BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) { + std::cout << "*****************************************************************************************************"; + using Point = typename TestedKernel::Point_d; + std::vector points; + // 50 points on a 4-sphere + points = Gudhi::generate_points_on_sphere_d(10, 5, 1.); + + Gudhi::alpha_complex::Alpha_complex alpha_complex(points); + + // Alpha complex + Gudhi::Simplex_tree<> stree_from_alpha_complex; + BOOST_CHECK(alpha_complex.create_complex(stree_from_alpha_complex)); + stree_from_alpha_complex.initialize_filtration(); + + // Delaunay complex + Gudhi::Simplex_tree<> stree_from_delaunay_complex; + BOOST_CHECK(alpha_complex.create_complex(stree_from_delaunay_complex, 0., false, true)); + + // Check all the simplices from alpha complex are in the Delaunay complex + for (auto f_simplex : stree_from_alpha_complex.filtration_simplex_range()) { + std::vector::Vertex_handle> simplex; + for (Gudhi::Simplex_tree<>::Vertex_handle vertex : stree_from_alpha_complex.simplex_vertex_range(f_simplex)) { + std::cout << "(" << vertex << ")"; + simplex.push_back(vertex); + } + std::cout << std::endl; + Gudhi::Simplex_tree<>::Simplex_handle sh = stree_from_delaunay_complex.find(simplex); + BOOST_CHECK(std::isnan(stree_from_delaunay_complex.filtration(sh))); + BOOST_CHECK(sh != stree_from_delaunay_complex.null_simplex()); + } +} -- cgit v1.2.3 From 2141ef8adfee531f3eaf822cf4076b9b010e6f94 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 3 Mar 2020 16:22:48 +0100 Subject: correction missing arg in test_wasserstein_distance --- src/python/test/test_wasserstein_distance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 02a1d2c9..d0f0323c 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -17,7 +17,7 @@ __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" __license__ = "MIT" -def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True): +def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) diag2 = np.array([[2.8, 4.45], [9.5, 14.1]]) diag3 = np.array([[0, 2], [4, 6]]) -- cgit v1.2.3 From 73194242e1c8012c1320a7581a382a3b2b59eb09 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 3 Mar 2020 16:30:22 +0100 Subject: Fix #172 and add a proper comment on the modification --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 76608008..5110819f 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1347,7 +1347,9 @@ class Simplex_tree { }); Filtration_value max_filt_border_value = filtration(*max_border); - if (simplex.second.filtration() < max_filt_border_value) { + // Replacing if(f=max)) would mean that if f is NaN, we replace it with the max of the children. + // That seems more useful than keeping NaN. + if (!(simplex.second.filtration() >= max_filt_border_value)) { // Store the filtration modification information modified = true; simplex.second.assign_filtration(max_filt_border_value); -- cgit v1.2.3 From efae8ff48c6b6e4d29afea753b7a1ddee0925ad4 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 3 Mar 2020 16:44:58 +0100 Subject: handle numpy array, should now adapt the doc --- src/python/gudhi/persistence_graphical_tools.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 8ddfdba8..43776fc6 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -45,6 +45,13 @@ def __min_birth_max_death(persistence, band=0.0): return (min_birth, max_death) +def _array_handler(a): + if isinstance(a, np.ndarray): + return [[0, x] for x in a] + else: + return a + + def plot_persistence_barcode( persistence=[], persistence_file="", @@ -95,6 +102,9 @@ def plot_persistence_barcode( plt.rc('text', usetex=True) plt.rc('font', family='serif') + + persistence = _array_handler(persistence) + if persistence_file != "": if path.isfile(persistence_file): # Reset persistence @@ -237,6 +247,7 @@ def plot_persistence_diagram( plt.rc('text', usetex=True) plt.rc('font', family='serif') + persistence = _array_handler(persistence) if persistence_file != "": if path.isfile(persistence_file): @@ -399,6 +410,7 @@ def plot_persistence_density( plt.rc('text', usetex=True) plt.rc('font', family='serif') + persistence = _array_handler(persistence) if persistence_file != "": if dimension is None: -- cgit v1.2.3 From 4f4030e9f9e0215c2d1f2431c02cd9270bba2699 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 3 Mar 2020 16:57:56 +0100 Subject: updated doc and example handling Nx2 numpy arrays --- src/python/doc/persistence_graphical_tools_sum.inc | 2 +- .../doc/persistence_graphical_tools_user.rst | 12 +++++++++++ src/python/gudhi/persistence_graphical_tools.py | 25 ++++++++++++++++------ 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index 2ddaccfc..ef376802 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -2,7 +2,7 @@ :widths: 30 50 20 +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ - | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau | + | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | | | | :Introduced in: GUDHI 2.0.0 | | | Note that these functions return the matplotlib axis, allowing | | diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst index ff51604e..91e52703 100644 --- a/src/python/doc/persistence_graphical_tools_user.rst +++ b/src/python/doc/persistence_graphical_tools_user.rst @@ -57,6 +57,18 @@ This function can display the persistence result as a diagram: ax.set_aspect("equal") # forces to be square shaped plt.show() +Note that (as barcode and density) it can also take a simple `np.array` +of shape (N x 2) encoding a persistence diagram (in a given dimension). + +.. plot:: + :include-source: + + import matplotlib.pyplot as plt + import gudhi + import numpy as np + d = np.array([[0, 1], [1, 2], [1, np.inf]]) + gudhi.plot_persistence_diagram(d) + plt.show() Persistence density ------------------- diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 43776fc6..48e26432 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -15,7 +15,7 @@ import numpy as np from gudhi.reader_utils import read_persistence_intervals_in_dimension from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension -__author__ = "Vincent Rouvreau, Bertrand Michel" +__author__ = "Vincent Rouvreau, Bertrand Michel, Theo Lacombe" __copyright__ = "Copyright (C) 2016 Inria" __license__ = "MIT" @@ -46,6 +46,11 @@ def __min_birth_max_death(persistence, band=0.0): def _array_handler(a): + ''' + :param a: if array, assumes it is a (n x 2) np.array and return a + persistence-compatible list (padding with 0), so that the + plot can be performed seamlessly. + ''' if isinstance(a, np.ndarray): return [[0, x] for x in a] else: @@ -65,9 +70,12 @@ def plot_persistence_barcode( fontsize=16, ): """This function plots the persistence bar code from persistence values list + , a np.array of shape (N x 2) (representing a diagram + in a single homology dimension), or from a :doc:`persistence file `. - :param persistence: Persistence intervals values list grouped by dimension. + :param persistence: Persistence intervals values list grouped by dimension, + or np.array of shape (N x 2). :type persistence: list of tuples(dimension, tuple(birth, death)). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). @@ -206,9 +214,11 @@ def plot_persistence_diagram( greyblock=True ): """This function plots the persistence diagram from persistence values - list or from a :doc:`persistence file `. + list, a np.array of shape (N x 2) representing a diagram in a single + homology dimension, or from a :doc:`persistence file `. - :param persistence: Persistence intervals values list grouped by dimension. + :param persistence: Persistence intervals values list grouped by dimension, + or np.array of shape (N x 2). :type persistence: list of tuples(dimension, tuple(birth, death)). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). @@ -356,12 +366,15 @@ def plot_persistence_density( greyblock=True ): """This function plots the persistence density from persistence - values list or from a :doc:`persistence file `. Be + values list, np.array of shape (N x 2) representing a diagram + in a single homology dimension, + or from a :doc:`persistence file `. Be aware that this function does not distinguish the dimension, it is up to you to select the required one. This function also does not handle degenerate data set (scipy correlation matrix inversion can fail). - :param persistence: Persistence intervals values list grouped by dimension. + :param persistence: Persistence intervals values list grouped by dimension, + or np.array of shape (N x 2). :type persistence: list of tuples(dimension, tuple(birth, death)). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). -- cgit v1.2.3 From 5b5e9fce6a80151f29f98dde67f5e4150edb9a5b Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 5 Mar 2020 10:03:26 +0100 Subject: Add some tests and documentation for NaN management in make_filtration_non_decreasing Simplex tree method --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 2 + src/Simplex_tree/test/CMakeLists.txt | 6 + ...ee_make_filtration_non_decreasing_unit_test.cpp | 148 +++++++++++++++++++++ src/Simplex_tree/test/simplex_tree_unit_test.cpp | 84 ------------ 4 files changed, 156 insertions(+), 84 deletions(-) create mode 100644 src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 5110819f..7b39a500 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1317,6 +1317,8 @@ class Simplex_tree { * \post Some simplex tree functions require the filtration to be valid. `make_filtration_non_decreasing()` * function is not launching `initialize_filtration()` but returns the filtration modification information. If the * complex has changed , please call `initialize_filtration()` to recompute it. + * + * If a simplex has a `NaN` filtration value, it is considered lower than any other defined filtration value. */ bool make_filtration_non_decreasing() { bool modified = false; diff --git a/src/Simplex_tree/test/CMakeLists.txt b/src/Simplex_tree/test/CMakeLists.txt index 8b9163f5..cf2b0153 100644 --- a/src/Simplex_tree/test/CMakeLists.txt +++ b/src/Simplex_tree/test/CMakeLists.txt @@ -28,3 +28,9 @@ if (TBB_FOUND) target_link_libraries(Simplex_tree_ctor_and_move_test_unit ${TBB_LIBRARIES}) endif() gudhi_add_boost_test(Simplex_tree_ctor_and_move_test_unit) + +add_executable ( Simplex_tree_make_filtration_non_decreasing_test_unit simplex_tree_make_filtration_non_decreasing_unit_test.cpp ) +if (TBB_FOUND) + target_link_libraries(Simplex_tree_make_filtration_non_decreasing_test_unit ${TBB_LIBRARIES}) +endif() +gudhi_add_boost_test(Simplex_tree_make_filtration_non_decreasing_test_unit) diff --git a/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp new file mode 100644 index 00000000..a8130e25 --- /dev/null +++ b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp @@ -0,0 +1,148 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Vincent Rouvreau + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include // for NaN +#include // for isNaN + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE "simplex_tree_make_filtration_non_decreasing" +#include +#include + +// ^ +// /!\ Nothing else from Simplex_tree shall be included to test includes are well defined. +#include "gudhi/Simplex_tree.h" + +using namespace Gudhi; + +typedef boost::mpl::list, Simplex_tree> list_of_tested_variants; + +BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing, typeST, list_of_tested_variants) { + typeST st; + + st.insert_simplex_and_subfaces({2, 1, 0}, 2.0); + st.insert_simplex_and_subfaces({3, 0}, 2.0); + st.insert_simplex_and_subfaces({3, 4, 5}, 2.0); + + /* Inserted simplex: */ + /* 1 */ + /* o */ + /* /X\ */ + /* o---o---o---o */ + /* 2 0 3\X/4 */ + /* o */ + /* 5 */ + + std::cout << "Check default insertion ensures the filtration values are non decreasing" << std::endl; + BOOST_CHECK(!st.make_filtration_non_decreasing()); + + // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0 + // to 1.0 + st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0); + + // Inserted simplex: + // 1 6 + // o---o + // /X\7/ + // o---o---o---o + // 2 0 3\X/4 + // o + // 5 + + std::cout << "Check default second insertion ensures the filtration values are non decreasing" << std::endl; + BOOST_CHECK(!st.make_filtration_non_decreasing()); + + // Copy original simplex tree + typeST st_copy = st; + + // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing + st.assign_filtration(st.find({0,1,6,7}), 0.8); + st.assign_filtration(st.find({0,1,6}), 0.9); + st.assign_filtration(st.find({0,6}), 0.6); + st.assign_filtration(st.find({3,4,5}), 1.2); + st.assign_filtration(st.find({3,4}), 1.1); + st.assign_filtration(st.find({4,5}), 1.99); + + std::cout << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl; + BOOST_CHECK(st.make_filtration_non_decreasing()); + BOOST_CHECK(st == st_copy); + + // Other simplex tree + typeST st_other; + st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st + st_other.insert_simplex_and_subfaces({3, 0}, 2.0); + st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0); + st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0); + + // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing + st.assign_filtration(st.find({2}), 3.0); + // By modifying just the simplex {2} + // {0,1,2}, {1,2} and {0,2} will be modified + + std::cout << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl; + BOOST_CHECK(st.make_filtration_non_decreasing()); + BOOST_CHECK(st == st_other); + + // Modify specific values for st still to be non-decreasing + st.assign_filtration(st.find({0,1,2}), 10.0); + st.assign_filtration(st.find({0,2}), 9.0); + st.assign_filtration(st.find({0,1,6,7}), 50.0); + st.assign_filtration(st.find({0,1,6}), 49.0); + st.assign_filtration(st.find({0,1,7}), 48.0); + // Other copy simplex tree + typeST st_other_copy = st; + + std::cout << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl; + BOOST_CHECK(!st.make_filtration_non_decreasing()); + BOOST_CHECK(st == st_other_copy); + +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing_on_nan_values, typeST, list_of_tested_variants) { + typeST st; + + st.insert_simplex_and_subfaces({2, 1, 0}, std::numeric_limits::quiet_NaN()); + st.insert_simplex_and_subfaces({3, 0}, std::numeric_limits::quiet_NaN()); + st.insert_simplex_and_subfaces({3, 4, 5}, std::numeric_limits::quiet_NaN()); + + /* Inserted simplex: */ + /* 1 */ + /* o */ + /* /X\ */ + /* o---o---o---o */ + /* 2 0 3\X/4 */ + /* o */ + /* 5 */ + + std::cout << "SPECIFIC CASE:" << std::endl; + std::cout << "Insertion with NaN values does not ensure the filtration values are non decreasing" << std::endl; + st.make_filtration_non_decreasing(); + + std::cout << "Check all filtration values are NaN" << std::endl; + for (auto f_simplex : st.filtration_simplex_range()) { + BOOST_CHECK(std::isnan(st.filtration(f_simplex))); + } + + st.assign_filtration(st.find({0}), 0.); + st.assign_filtration(st.find({1}), 0.); + st.assign_filtration(st.find({2}), 0.); + st.assign_filtration(st.find({3}), 0.); + st.assign_filtration(st.find({4}), 0.); + st.assign_filtration(st.find({5}), 0.); + + std::cout << "Check make_filtration_non_decreasing is modifying the simplicial complex" << std::endl; + BOOST_CHECK(st.make_filtration_non_decreasing()); + + std::cout << "Check all filtration values are now defined" << std::endl; + for (auto f_simplex : st.filtration_simplex_range()) { + BOOST_CHECK(!std::isnan(st.filtration(f_simplex))); + } +} \ No newline at end of file diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp index 58bfa8db..e739ad0a 100644 --- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp @@ -791,90 +791,6 @@ BOOST_AUTO_TEST_CASE(non_contiguous) { BOOST_CHECK(++i == std::end(b)); } -BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) { - std::cout << "********************************************************************" << std::endl; - std::cout << "MAKE FILTRATION NON DECREASING" << std::endl; - typedef Simplex_tree<> typeST; - typeST st; - - st.insert_simplex_and_subfaces({2, 1, 0}, 2.0); - st.insert_simplex_and_subfaces({3, 0}, 2.0); - st.insert_simplex_and_subfaces({3, 4, 5}, 2.0); - - /* Inserted simplex: */ - /* 1 */ - /* o */ - /* /X\ */ - /* o---o---o---o */ - /* 2 0 3\X/4 */ - /* o */ - /* 5 */ - - std::cout << "Check default insertion ensures the filtration values are non decreasing" << std::endl; - BOOST_CHECK(!st.make_filtration_non_decreasing()); - - // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0 - // to 1.0 - st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0); - - // Inserted simplex: - // 1 6 - // o---o - // /X\7/ - // o---o---o---o - // 2 0 3\X/4 - // o - // 5 - - std::cout << "Check default second insertion ensures the filtration values are non decreasing" << std::endl; - BOOST_CHECK(!st.make_filtration_non_decreasing()); - - // Copy original simplex tree - typeST st_copy = st; - - // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing - st.assign_filtration(st.find({0,1,6,7}), 0.8); - st.assign_filtration(st.find({0,1,6}), 0.9); - st.assign_filtration(st.find({0,6}), 0.6); - st.assign_filtration(st.find({3,4,5}), 1.2); - st.assign_filtration(st.find({3,4}), 1.1); - st.assign_filtration(st.find({4,5}), 1.99); - - std::cout << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl; - BOOST_CHECK(st.make_filtration_non_decreasing()); - BOOST_CHECK(st == st_copy); - - // Other simplex tree - typeST st_other; - st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st - st_other.insert_simplex_and_subfaces({3, 0}, 2.0); - st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0); - st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0); - - // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing - st.assign_filtration(st.find({2}), 3.0); - // By modifying just the simplex {2} - // {0,1,2}, {1,2} and {0,2} will be modified - - std::cout << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl; - BOOST_CHECK(st.make_filtration_non_decreasing()); - BOOST_CHECK(st == st_other); - - // Modify specific values for st still to be non-decreasing - st.assign_filtration(st.find({0,1,2}), 10.0); - st.assign_filtration(st.find({0,2}), 9.0); - st.assign_filtration(st.find({0,1,6,7}), 50.0); - st.assign_filtration(st.find({0,1,6}), 49.0); - st.assign_filtration(st.find({0,1,7}), 48.0); - // Other copy simplex tree - typeST st_other_copy = st; - - std::cout << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl; - BOOST_CHECK(!st.make_filtration_non_decreasing()); - BOOST_CHECK(st == st_other_copy); - -} - typedef boost::mpl::list, -- cgit v1.2.3 From e1dbf6da118615e20d2b642df98b7d3df7cfd8c7 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 5 Mar 2020 10:16:51 +0100 Subject: Remove travis and use appveyor for OSx. Fix parallel test by setting tests dependencies --- .github/build-requirements.txt | 5 ++ .github/test-requirements.txt | 8 +++ .travis.yml | 78 ---------------------- Dockerfile_for_circleci_image | 22 ++---- azure-pipelines.yml | 44 ++++++++++++ src/Alpha_complex/example/CMakeLists.txt | 6 +- src/Alpha_complex/utilities/CMakeLists.txt | 12 +++- .../utilities/CMakeLists.txt | 10 ++- .../utilities/persistence_heat_maps/CMakeLists.txt | 19 ++++-- .../utilities/persistence_intervals/CMakeLists.txt | 7 +- .../persistence_landscapes/CMakeLists.txt | 8 +-- .../persistence_landscapes_on_grid/CMakeLists.txt | 8 +-- .../utilities/persistence_vectors/CMakeLists.txt | 8 +-- src/Rips_complex/example/CMakeLists.txt | 8 +++ src/common/example/CMakeLists.txt | 1 + src/python/CMakeLists.txt | 2 +- 16 files changed, 128 insertions(+), 118 deletions(-) create mode 100644 .github/build-requirements.txt create mode 100644 .github/test-requirements.txt delete mode 100644 .travis.yml create mode 100644 azure-pipelines.yml diff --git a/.github/build-requirements.txt b/.github/build-requirements.txt new file mode 100644 index 00000000..7de60d23 --- /dev/null +++ b/.github/build-requirements.txt @@ -0,0 +1,5 @@ +setuptools +wheel +numpy +Cython +pybind11 \ No newline at end of file diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt new file mode 100644 index 00000000..bd03f98e --- /dev/null +++ b/.github/test-requirements.txt @@ -0,0 +1,8 @@ +pytest +sphinx +sphinxcontrib-bibtex +sphinx-paramlinks +matplotlib +scipy +scikit-learn +POT \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 8980be10..00000000 --- a/.travis.yml +++ /dev/null @@ -1,78 +0,0 @@ -language: cpp - -sudo: required - -git: - depth: 3 - -os: osx -osx_image: xcode10.2 -compiler: clang - -matrix: - include: - - env: - # 1. Only examples and associated tests - - CMAKE_EXAMPLE='ON' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure' - - env: - # 2. Only unitary tests - - CMAKE_EXAMPLE='OFF' CMAKE_TEST='ON' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure' - - env: - # 3. Only utilities and associated tests - - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='ON' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure' - - env: - # 4. Only doxygen documentation - - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='doxygen' CTEST_COMMAND='echo No tests for doxygen target' - - env: - # 5. Only Python, associated tests and sphinx documentation - # $ which python3 => /usr/local/bin/python3 - # cmake => -- Found PythonInterp: /usr/local/bin/python3 (found version "3.7.5") - # In python3-sphinx-build.py, print(sys.executable) => /usr/local/opt/python/bin/python3.7 ??? - # should be : MAKE_TARGET='all sphinx' CTEST_COMMAND='ctest --output-on-failure' - - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='ON' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure -E sphinx' - -cache: - directories: - - $HOME/.cache/pip - - $HOME/Library/Caches/Homebrew - -before_install: - - brew update && brew unlink python@2 && brew upgrade python - -addons: - homebrew: - packages: - - python3 - - cmake - - graphviz - - doxygen - - boost - - eigen - - gmp - - mpfr - - tbb - - cgal - -before_cache: - - rm -f $HOME/.cache/pip/log/debug.log - - brew cleanup - -# When installing through libcgal-dev apt, CMake Error at CGAL Exports.cmake The imported target "CGAL::CGAL Qt5" references the file -install: - - python3 -m pip install --upgrade pip setuptools wheel - - python3 -m pip install --user pytest Cython sphinx sphinxcontrib-bibtex sphinx-paramlinks matplotlib numpy scipy scikit-learn - - python3 -m pip install --user POT pybind11 - -script: - - rm -rf build - - mkdir -p build - - cd build - - cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=${CMAKE_EXAMPLE} -DWITH_GUDHI_TEST=${CMAKE_TEST} -DWITH_GUDHI_UTILITIES=${CMAKE_UTILITIES} -DWITH_GUDHI_PYTHON=${CMAKE_PYTHON} -DUSER_VERSION_DIR=version -DPython_ADDITIONAL_VERSIONS=3 .. - - make ${MAKE_TARGET} - - ${CTEST_COMMAND} - - cd .. - -notifications: - email: - on_success: change # default: always - on_failure: always # default: always diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index ebd2f366..cca93f0c 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -42,24 +42,16 @@ RUN apt-get install -y make \ locales \ python3 \ python3-pip \ - python3-pytest \ python3-tk \ - python3-pybind11 \ libfreetype6-dev \ - pkg-config + pkg-config \ + curl -RUN pip3 install \ - setuptools \ - numpy \ - matplotlib \ - scipy \ - Cython \ - POT \ - scikit-learn \ - sphinx \ - sphinx-paramlinks \ - sphinxcontrib-bibtex \ - tensorflow +ADD .github/build-requirements.txt / +ADD .github/test-requirements.txt / + +RUN pip3 install -r build-requirements.txt +RUN pip3 install -r test-requirements.txt # apt clean up RUN apt autoremove && rm -rf /var/lib/apt/lists/* diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 00000000..77e0ac88 --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,44 @@ +trigger: + batch: true + branches: + include: + - '*' # All branches + +jobs: + + - job: 'Test' + displayName: "Build and test" + timeoutInMinutes: 0 + cancelTimeoutInMinutes: 60 + + strategy: + matrix: + macOSrelease: + imageName: 'macos-10.14' + CMakeBuildType: Release + customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' + + pool: + vmImage: $(imageName) + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.7' + architecture: 'x64' + + - script: | + $(customInstallation) + git submodule update --init + python -m pip install --upgrade pip + python -m pip install --user -r .github/build-requirements.txt + python -m pip install --user -r .github/test-requirements.txt + displayName: 'Install build dependencies' + - script: | + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. + make + make doxygen + ctest -j 8 --output-on-failure -E sphinx # remove sphinx build as it fails + displayName: 'Build, test and documentation generation' diff --git a/src/Alpha_complex/example/CMakeLists.txt b/src/Alpha_complex/example/CMakeLists.txt index b0337934..2eecd50c 100644 --- a/src/Alpha_complex/example/CMakeLists.txt +++ b/src/Alpha_complex/example/CMakeLists.txt @@ -32,14 +32,18 @@ if (DIFF_PATH) add_test(Alpha_complex_example_from_off_60_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt) + set_tests_properties(Alpha_complex_example_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_60) add_test(Alpha_complex_example_from_off_32_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt) + set_tests_properties(Alpha_complex_example_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_32) add_test(Alpha_complex_example_fast_from_off_60_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt) + set_tests_properties(Alpha_complex_example_fast_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_60) add_test(Alpha_complex_example_fast_from_off_32_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt) - endif() + set_tests_properties(Alpha_complex_example_fast_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_32) +endif() add_executable ( Alpha_complex_example_weighted_3d_from_points Weighted_alpha_complex_3d_from_points.cpp ) target_link_libraries(Alpha_complex_example_weighted_3d_from_points ${CGAL_LIBRARY}) diff --git a/src/Alpha_complex/utilities/CMakeLists.txt b/src/Alpha_complex/utilities/CMakeLists.txt index a3b0cc24..2ffbdde0 100644 --- a/src/Alpha_complex/utilities/CMakeLists.txt +++ b/src/Alpha_complex/utilities/CMakeLists.txt @@ -16,8 +16,14 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) if (DIFF_PATH) add_test(Alpha_complex_utilities_diff_exact_alpha_complex ${DIFF_PATH} "exact.pers" "safe.pers") + set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex PROPERTIES DEPENDS + "Alpha_complex_utilities_exact_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence") + add_test(Alpha_complex_utilities_diff_fast_alpha_complex ${DIFF_PATH} "fast.pers" "safe.pers") + set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex PROPERTIES DEPENDS + "Alpha_complex_utilities_fast_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence") + endif() install(TARGETS alpha_complex_persistence DESTINATION bin) @@ -36,15 +42,19 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "exact_3d.pers" "-e") - add_test(NAME Alpha_complex_utilities_safe_alpha_complex_3d COMMAND $ + add_test(NAME Alpha_complex_utilities_fast_alpha_complex_3d COMMAND $ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "fast_3d.pers" "-f") if (DIFF_PATH) add_test(Alpha_complex_utilities_diff_exact_alpha_complex_3d ${DIFF_PATH} "exact_3d.pers" "safe_3d.pers") + set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex_3d PROPERTIES DEPENDS + "Alpha_complex_utilities_exact_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d") add_test(Alpha_complex_utilities_diff_fast_alpha_complex_3d ${DIFF_PATH} "fast_3d.pers" "safe_3d.pers") + set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex_3d PROPERTIES DEPENDS + "Alpha_complex_utilities_fast_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d") endif() add_test(NAME Alpha_complex_utilities_periodic_alpha_complex_3d_persistence COMMAND $ diff --git a/src/Persistence_representations/utilities/CMakeLists.txt b/src/Persistence_representations/utilities/CMakeLists.txt index fc51b1d6..85633b7b 100644 --- a/src/Persistence_representations/utilities/CMakeLists.txt +++ b/src/Persistence_representations/utilities/CMakeLists.txt @@ -14,7 +14,7 @@ function(add_persistence_representation_creation_utility creation_utility) install(TARGETS ${creation_utility} DESTINATION bin) endfunction(add_persistence_representation_creation_utility) -function(add_persistence_representation_plot_utility plot_utility tool_extension) +function(add_persistence_representation_plot_utility creation_utility plot_utility tool_extension) add_executable ( ${plot_utility} ${plot_utility}.cpp ) # as the function is called in a subdirectory level, need to '../' to find persistence heat maps files @@ -22,17 +22,21 @@ function(add_persistence_representation_plot_utility plot_utility tool_extension "${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}") #add_test(NAME Persistence_representation_utilities_${plot_utility}_second COMMAND $ # "${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}") + set_tests_properties(Persistence_representation_utilities_${plot_utility}_first PROPERTIES DEPENDS + Persistence_representation_utilities_${creation_utility}) if(GNUPLOT_PATH) add_test(NAME Persistence_representation_utilities_${plot_utility}_first_gnuplot COMMAND ${GNUPLOT_PATH} "-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}_GnuplotScript'") #add_test(NAME Persistence_representation_utilities_${plot_utility}_second_gnuplot COMMAND ${GNUPLOT_PATH} # "-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}_GnuplotScript'") + set_tests_properties(Persistence_representation_utilities_${plot_utility}_first_gnuplot PROPERTIES DEPENDS + Persistence_representation_utilities_${plot_utility}_first) endif() install(TARGETS ${plot_utility} DESTINATION bin) endfunction(add_persistence_representation_plot_utility) -function(add_persistence_representation_function_utility function_utility tool_extension) +function(add_persistence_representation_function_utility creation_utility function_utility tool_extension) add_executable ( ${function_utility} ${function_utility}.cpp ) # ARGV2 is an optional argument @@ -48,6 +52,8 @@ function(add_persistence_representation_function_utility function_utility tool_e "${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}" "${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}") endif() + set_tests_properties(Persistence_representation_utilities_${function_utility} PROPERTIES DEPENDS + Persistence_representation_utilities_${creation_utility}) install(TARGETS ${function_utility} DESTINATION bin) endfunction(add_persistence_representation_function_utility) diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt index 89ef232f..e4c471c2 100644 --- a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt +++ b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt @@ -2,13 +2,24 @@ project(Persistence_representations_heat_maps_utilities) add_persistence_representation_creation_utility(create_pssk "10" "-1" "-1" "4" "-1") add_persistence_representation_creation_utility(create_p_h_m_weighted_by_arctan_of_their_persistence "10" "-1" "-1" "4" "-1") + add_persistence_representation_creation_utility(create_p_h_m_weighted_by_distance_from_diagonal "10" "-1" "-1" "4" "-1") +# Tests output the same file +set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal PROPERTIES DEPENDS + Persistence_representation_utilities_create_p_h_m_weighted_by_arctan_of_their_persistence) + add_persistence_representation_creation_utility(create_p_h_m_weighted_by_squared_diag_distance "10" "-1" "-1" "4" "-1") +# Tests output the same file +set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance PROPERTIES DEPENDS + Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal) + # Need to set grid min and max for further average, distance and scalar_product add_persistence_representation_creation_utility(create_persistence_heat_maps "10" "0" "35" "10" "-1") +set_tests_properties(Persistence_representation_utilities_create_persistence_heat_maps PROPERTIES DEPENDS + Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance) -add_persistence_representation_plot_utility(plot_persistence_heat_map ".mps") +add_persistence_representation_plot_utility(create_persistence_heat_maps plot_persistence_heat_map ".mps") -add_persistence_representation_function_utility(average_persistence_heat_maps ".mps") -add_persistence_representation_function_utility(compute_distance_of_persistence_heat_maps ".mps" "1") -add_persistence_representation_function_utility(compute_scalar_product_of_persistence_heat_maps ".mps") +add_persistence_representation_function_utility(create_persistence_heat_maps average_persistence_heat_maps ".mps") +add_persistence_representation_function_utility(create_persistence_heat_maps compute_distance_of_persistence_heat_maps ".mps" "1") +add_persistence_representation_function_utility(create_persistence_heat_maps compute_scalar_product_of_persistence_heat_maps ".mps") diff --git a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt index a025183e..118c1e9b 100644 --- a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt +++ b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt @@ -3,17 +3,16 @@ project(Persistence_representations_intervals_utilities) add_executable ( plot_histogram_of_intervals_lengths plot_histogram_of_intervals_lengths.cpp ) -add_test(NAME plot_histogram_of_intervals_lengths COMMAND $ +add_test(NAME Persistence_representation_utilities_plot_histogram_of_intervals_lengths COMMAND $ "${CMAKE_CURRENT_BINARY_DIR}/../first.pers" "-1") install(TARGETS plot_histogram_of_intervals_lengths DESTINATION bin) -add_persistence_representation_plot_utility(plot_persistence_intervals "") -add_persistence_representation_plot_utility(plot_persistence_Betti_numbers "") +add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_intervals "") +add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_Betti_numbers "") add_persistence_representation_creation_utility(compute_birth_death_range_in_persistence_diagram "-1") - add_executable ( compute_number_of_dominant_intervals compute_number_of_dominant_intervals.cpp ) add_test(NAME Persistence_representation_utilities_compute_number_of_dominant_intervals COMMAND $ diff --git a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt index 6b24d032..4df84d36 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt +++ b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt @@ -2,8 +2,8 @@ project(Persistence_representations_landscapes_utilities) add_persistence_representation_creation_utility(create_landscapes "-1") -add_persistence_representation_plot_utility(plot_landscapes ".land") +add_persistence_representation_plot_utility(create_landscapes plot_landscapes ".land") -add_persistence_representation_function_utility(average_landscapes ".land") -add_persistence_representation_function_utility(compute_distance_of_landscapes ".land" "1") -add_persistence_representation_function_utility(compute_scalar_product_of_landscapes ".land") +add_persistence_representation_function_utility(create_landscapes average_landscapes ".land") +add_persistence_representation_function_utility(create_landscapes compute_distance_of_landscapes ".land" "1") +add_persistence_representation_function_utility(create_landscapes compute_scalar_product_of_landscapes ".land") diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt index 36f3196b..8cd965f1 100644 --- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt +++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt @@ -3,8 +3,8 @@ project(Persistence_representations_lanscapes_on_grid_utilities) # Need to set grid min and max for further average, distance and scalar_product add_persistence_representation_creation_utility(create_landscapes_on_grid "100" "0" "35" "-1") -add_persistence_representation_plot_utility(plot_landscapes_on_grid ".g_land") +add_persistence_representation_plot_utility(create_landscapes_on_grid plot_landscapes_on_grid ".g_land") -add_persistence_representation_function_utility(average_landscapes_on_grid ".g_land") -add_persistence_representation_function_utility(compute_distance_of_landscapes_on_grid ".g_land" "1") -add_persistence_representation_function_utility(compute_scalar_product_of_landscapes_on_grid ".g_land") +add_persistence_representation_function_utility(create_landscapes_on_grid average_landscapes_on_grid ".g_land") +add_persistence_representation_function_utility(create_landscapes_on_grid compute_distance_of_landscapes_on_grid ".g_land" "1") +add_persistence_representation_function_utility(create_landscapes_on_grid compute_scalar_product_of_landscapes_on_grid ".g_land") diff --git a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt index bc982094..5b22ca84 100644 --- a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt +++ b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt @@ -2,8 +2,8 @@ project(Persistence_vectors_utilities) add_persistence_representation_creation_utility(create_persistence_vectors "-1") -add_persistence_representation_plot_utility(plot_persistence_vectors ".vect") +add_persistence_representation_plot_utility(create_persistence_vectors plot_persistence_vectors ".vect") -add_persistence_representation_function_utility(average_persistence_vectors ".vect") -add_persistence_representation_function_utility(compute_distance_of_persistence_vectors ".vect" "1") -add_persistence_representation_function_utility(compute_scalar_product_of_persistence_vectors ".vect") +add_persistence_representation_function_utility(create_persistence_vectors average_persistence_vectors ".vect") +add_persistence_representation_function_utility(create_persistence_vectors compute_distance_of_persistence_vectors ".vect" "1") +add_persistence_representation_function_utility(create_persistence_vectors compute_scalar_product_of_persistence_vectors ".vect") diff --git a/src/Rips_complex/example/CMakeLists.txt b/src/Rips_complex/example/CMakeLists.txt index e7772bdb..244a93ec 100644 --- a/src/Rips_complex/example/CMakeLists.txt +++ b/src/Rips_complex/example/CMakeLists.txt @@ -53,15 +53,23 @@ if (DIFF_PATH) add_test(Rips_complex_example_from_off_doc_12_1_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_1.txt ${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt) + set_tests_properties(Rips_complex_example_from_off_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_1) + add_test(Rips_complex_example_from_off_doc_12_3_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_3.txt ${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt) + set_tests_properties(Rips_complex_example_from_off_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_3) + add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_1.txt ${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt) + set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_1) + add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_3.txt ${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt) + set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_3) + endif() install(TARGETS Rips_complex_example_from_off DESTINATION bin) diff --git a/src/common/example/CMakeLists.txt b/src/common/example/CMakeLists.txt index 583a0027..fa8eb98c 100644 --- a/src/common/example/CMakeLists.txt +++ b/src/common/example/CMakeLists.txt @@ -12,6 +12,7 @@ if (DIFF_PATH) add_test(Common_example_vector_double_off_reader_diff_files ${DIFF_PATH} ${CMAKE_CURRENT_BINARY_DIR}/vectordoubleoffreader_result.txt ${CMAKE_CURRENT_BINARY_DIR}/alphacomplexdoc.off.txt) + set_tests_properties(Common_example_vector_double_off_reader_diff_files PROPERTIES DEPENDS Common_example_vector_double_off_reader) endif() if(NOT CGAL_VERSION VERSION_LESS 4.11.0) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 090a7446..20e72a5f 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -181,7 +181,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${MPFR_LIBRARIES_DIR}', ") message("** Add mpfr ${MPFR_LIBRARIES}") endif(MPFR_FOUND) -endif(CGAL_FOUND) + endif(CGAL_FOUND) # Specific for Mac if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") -- cgit v1.2.3 From a9d2eae3139d291a3d66e71174b22a851bd797ba Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 5 Mar 2020 10:36:19 +0100 Subject: remove ext (submodule directory) from doxygen parsing --- src/Doxyfile.in | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Doxyfile.in b/src/Doxyfile.in index ec551882..49e781bd 100644 --- a/src/Doxyfile.in +++ b/src/Doxyfile.in @@ -785,6 +785,7 @@ EXCLUDE = data/ \ GudhUI/ \ cmake/ \ python/ \ + ext/ \ README.md # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -- cgit v1.2.3 From 05985743182a5e7d462a6a8b872b92c9b8a90404 Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Thu, 5 Mar 2020 11:31:47 +0100 Subject: Update src/python/gudhi/persistence_graphical_tools.py Co-Authored-By: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> --- src/python/gudhi/persistence_graphical_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 48e26432..c9af88f5 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -51,7 +51,7 @@ def _array_handler(a): persistence-compatible list (padding with 0), so that the plot can be performed seamlessly. ''' - if isinstance(a, np.ndarray): + if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float): return [[0, x] for x in a] else: return a -- cgit v1.2.3 From 19ea0c10f283188282a78ebebf4c1a51f2f40040 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 5 Mar 2020 14:14:27 +0100 Subject: CR: use complex_simplex_range instead of filtration_simplex_range --- .../test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp index a8130e25..4697ec05 100644 --- a/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp @@ -127,7 +127,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing_on_nan_values, type st.make_filtration_non_decreasing(); std::cout << "Check all filtration values are NaN" << std::endl; - for (auto f_simplex : st.filtration_simplex_range()) { + for (auto f_simplex : st.complex_simplex_range()) { BOOST_CHECK(std::isnan(st.filtration(f_simplex))); } @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing_on_nan_values, type BOOST_CHECK(st.make_filtration_non_decreasing()); std::cout << "Check all filtration values are now defined" << std::endl; - for (auto f_simplex : st.filtration_simplex_range()) { + for (auto f_simplex : st.complex_simplex_range()) { BOOST_CHECK(!std::isnan(st.filtration(f_simplex))); } } \ No newline at end of file -- cgit v1.2.3 From c1ce28b8e8021097825a893564aed97757f2ac8e Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Thu, 5 Mar 2020 14:27:43 +0100 Subject: Fix bad link --- .github/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 13d6cad7..a18ff8bd 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -19,7 +19,7 @@ There is a label **enhancement** in the [new issue](https://github.com/GUDHI/gud ## You are not familiar with GitHub ? -Please take some time to read our [how to use GitHub to contribute to GUDHI](/home/vincent/workspace/gudhi/gudhi-devel/for_dev/how_to_use_github_to_contribute_to_gudhi.md). +Please take some time to read our [how to use GitHub to contribute to GUDHI](how_to_use_github_to_contribute_to_gudhi.md). ## Something you want to improve in the documentation -- cgit v1.2.3 From eccfa153efe660662be945134115f7634c27335d Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 5 Mar 2020 15:32:09 +0100 Subject: updated doc --- src/python/gudhi/persistence_graphical_tools.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index c9af88f5..6fd854ff 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -74,9 +74,8 @@ def plot_persistence_barcode( in a single homology dimension), or from a :doc:`persistence file `. - :param persistence: Persistence intervals values list grouped by dimension, - or np.array of shape (N x 2). - :type persistence: list of tuples(dimension, tuple(birth, death)). + :param persistence: Persistence intervals values list. Can be grouped by dimension or not. + :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). :type persistence_file: string @@ -217,9 +216,8 @@ def plot_persistence_diagram( list, a np.array of shape (N x 2) representing a diagram in a single homology dimension, or from a :doc:`persistence file `. - :param persistence: Persistence intervals values list grouped by dimension, - or np.array of shape (N x 2). - :type persistence: list of tuples(dimension, tuple(birth, death)). + :param persistence: Persistence intervals values list. Can be grouped by dimension or not. + :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). :type persistence_file: string @@ -373,9 +371,10 @@ def plot_persistence_density( up to you to select the required one. This function also does not handle degenerate data set (scipy correlation matrix inversion can fail). - :param persistence: Persistence intervals values list grouped by dimension, - or np.array of shape (N x 2). - :type persistence: list of tuples(dimension, tuple(birth, death)). + :param persistence: Persistence intervals values list. + Can be grouped by dimension or not. + :type persistence: an array of (dimension, array of (birth, death)) + or an array of (birth, death). :param persistence_file: A :doc:`persistence file ` style name (reset persistence if both are set). :type persistence_file: string -- cgit v1.2.3 From 570a9b83eb3f714bc52735dae289a5195874bf41 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Thu, 5 Mar 2020 15:40:45 +0100 Subject: completed as... --- src/python/gudhi/wasserstein.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index ba0f7343..aab0cb3c 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -30,7 +30,9 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): :param order: exponent for the Wasserstein metric. :param internal_p: Ground metric (i.e. norm L^p). :returns: (n+1) x (m+1) np.array encoding the cost matrix C. - For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal. + For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], + while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) + and its orthogonal proj onto the diagonal. note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal). ''' Xdiag = _proj_on_diag(X) @@ -88,7 +90,9 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. - :param matching: if True, computes and returns the optimal matching between X and Y, encoded as... + :param matching: if True, computes and returns the optimal matching between X and Y, encoded as + a list of tuple [...(i,j)...], meaning the i-th point in X is matched to + the j-th point in Y, with the convention (-1) represents the diagonal. :param order: exponent for Wasserstein; Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). -- cgit v1.2.3 From 27d5a1dfdaeb2bc1840e9c39be4c58570d948d56 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Thu, 5 Mar 2020 16:27:30 +0100 Subject: [skip ci] Replace travis badge with azure one --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f7e3d70c..279953e1 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -[![Build Status](https://travis-ci.org/GUDHI/gudhi-devel.svg?branch=master)](https://travis-ci.org/GUDHI/gudhi-devel) -[![CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master) -[![Build status](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master) +[![OSx on Azure](https://dev.azure.com/GUDHI/gudhi-devel/_apis/build/status/GUDHI.gudhi-devel?branchName=master)](https://dev.azure.com/GUDHI/gudhi-devel/_build/latest?definitionId=1&branchName=master) +[![Linux on CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master) +[![Win on Appveyor](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master) [![Anaconda Cloud](https://anaconda.org/conda-forge/gudhi/badges/version.svg)](https://anaconda.org/conda-forge/gudhi) [![Anaconda downloads](https://anaconda.org/conda-forge/gudhi/badges/downloads.svg)](https://anaconda.org/conda-forge/gudhi) -- cgit v1.2.3 From ae8c34ad96f421d480cbe6bff9c04ec6f0eff920 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Thu, 5 Mar 2020 16:55:16 +0100 Subject: [skip ci] Install full LaTex matplotlib seems to fail to produce LaTeX-like plots --- Dockerfile_for_circleci_image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index cca93f0c..1eededb5 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -30,7 +30,7 @@ RUN apt-get install -y make \ cmake \ graphviz \ perl \ - texlive-bibtex-extra \ + texlive-full \ biber \ doxygen \ libboost-all-dev \ -- cgit v1.2.3 From 7789744d5d5972ef2d6218296a7b8a0a05337679 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 6 Mar 2020 13:31:05 +0100 Subject: set greyblock to False by default in density --- src/python/gudhi/persistence_graphical_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 6fd854ff..8c38b684 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -361,7 +361,7 @@ def plot_persistence_density( legend=False, axes=None, fontsize=16, - greyblock=True + greyblock=False ): """This function plots the persistence density from persistence values list, np.array of shape (N x 2) representing a diagram @@ -410,7 +410,7 @@ def plot_persistence_density( :param fontsize: Fontsize to use in axis. :type fontsize: int :param greyblock: if we want to plot a grey patch on the lower half plane - for nicer rendering. Default True. + for nicer rendering. Default False. :type greyblock: boolean :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn. """ -- cgit v1.2.3 From 78ccc10eb0034a4648df303f2913b6b4680b085e Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 6 Mar 2020 17:35:38 +0100 Subject: Generators for simplex tree --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 86 +++++++++++++++++++++- .../gudhi/Simplex_tree/Simplex_tree_iterators.h | 12 +-- src/Simplex_tree/test/simplex_tree_unit_test.cpp | 36 +++++++++ 3 files changed, 122 insertions(+), 12 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 76608008..7315bf45 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef GUDHI_USE_TBB #include @@ -246,8 +247,8 @@ class Simplex_tree { * which is consequenlty * equal to \f$(-1)^{\text{dim} \sigma}\f$ the canonical orientation on the simplex. */ - Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) { - assert(sh != null_simplex()); // Empty simplex + Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) const { + GUDHI_CHECK(sh != null_simplex(), "empty simplex"); return Simplex_vertex_range(Simplex_vertex_iterator(this, sh), Simplex_vertex_iterator(this)); } @@ -450,6 +451,15 @@ class Simplex_tree { return true; } + /** \brief Returns the filtration value of a simplex. + * + * Same as `filtration()`, but does not handle `null_simplex()`. + */ + static Filtration_value filtration_(Simplex_handle sh) { + GUDHI_CHECK (sh != null_simplex(), "null simplex"); + return sh->second.filtration(); + } + public: /** \brief Returns the key associated to a simplex. * @@ -827,7 +837,7 @@ class Simplex_tree { /** Returns the Siblings containing a simplex.*/ template - Siblings* self_siblings(SimplexHandle sh) { + static Siblings* self_siblings(SimplexHandle sh) { if (sh->second.children()->parent() == sh->first) return sh->second.children()->oncles(); else @@ -1465,6 +1475,76 @@ class Simplex_tree { } } + /** \brief Returns a vertex of `sh` that has the same filtration value as `sh` if it exists, and `null_vertex()` otherwise. + * + * For a lower-star filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which vertex had its filtration value propagated to `sh`. + * If several vertices have the same filtration value, the one it returns is arbitrary. */ + Vertex_handle vertex_with_same_filtration(Simplex_handle sh) { + auto filt = filtration_(sh); + for(auto v : simplex_vertex_range(sh)) + if(filtration_(find_vertex(v)) == filt) + return v; + return null_vertex(); + } + + /** \brief Returns an edge of `sh` that has the same filtration value as `sh` if it exists, and `null_simplex()` otherwise. + * + * For a flag-complex built with `expansion()`, this is a way to invert the process and find out which edge had its filtration value propagated to `sh`. + * If several edges have the same filtration value, the one it returns is arbitrary. + * + * \pre `sh` must have dimension at least 1. */ + Simplex_handle edge_with_same_filtration(Simplex_handle sh) { +#if 0 + // FIXME: Only do this if dim >= 2, since we don't want to return a vertex... + // Test if we are lucky and the parent has the same filtration value. + Siblings* sib = self_siblings(sh); + Vertex_handle v_par = sib->parent(); + sib = sib->oncles(); + Simplex_handle par = sib->find(v_par); + if(filtration_(par) == filt) return edge_with_same_filtration(par); +#endif + auto&& vertices = simplex_vertex_range(sh); + auto end = std::end(vertices); + auto vi = std::begin(vertices); + GUDHI_CHECK(vi != end, "empty simplex"); + auto v0 = *vi; + ++vi; + GUDHI_CHECK(vi != end, "simplex of dimension 0"); + if(std::next(vi) == end) return sh; // shortcut for dimension 1 + boost::container::static_vector suffix; + suffix.push_back(v0); + auto filt = filtration_(sh); + do + { + Vertex_handle v = *vi; + auto&& children1 = find_vertex(v)->second.children()->members_; + for(auto w : suffix){ + // Can we take advantage of the fact that suffix is ordered? + Simplex_handle s = children1.find(w); + if(filtration_(s) == filt) + return s; + } + suffix.push_back(v); + } + while(++vi != end); + return null_simplex(); + } + + /** \brief Returns an edge of `sh` that has the same filtration value as `sh` if it exists, and `null_simplex()` otherwise. + * + * For a flag-complex built with `expansion()`, this is a way to invert the process and find out which edge had its filtration value propagated to `sh`. + * If several edges have the same filtration value, the one it returns is arbitrary. */ + Simplex_handle minimal_simplex_with_same_filtration(Simplex_handle sh) { + if(dimension(sh) == 0) // vertices are minimal + return sh; + auto filt = filtration_(sh); + // Naive implementation, it can be sped up. + for(auto b : boundary_simplex_range(sh)) + if(filtration_(b) == filt) + return minimal_simplex_with_same_filtration(b); + return sh; // None of its faces has the same filtration. + } + private: Vertex_handle null_vertex_; /** \brief Total number of simplices in the complex, without the empty simplex.*/ diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h index efccf2f2..9007b6bd 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h @@ -15,9 +15,7 @@ #include #include -#if BOOST_VERSION >= 105600 -# include -#endif +#include #include @@ -42,13 +40,13 @@ class Simplex_tree_simplex_vertex_iterator : public boost::iterator_facade< typedef typename SimplexTree::Siblings Siblings; typedef typename SimplexTree::Vertex_handle Vertex_handle; - explicit Simplex_tree_simplex_vertex_iterator(SimplexTree * st) + explicit Simplex_tree_simplex_vertex_iterator(SimplexTree const* st) : // any end() iterator sib_(nullptr), v_(st->null_vertex()) { } - Simplex_tree_simplex_vertex_iterator(SimplexTree * st, Simplex_handle sh) + Simplex_tree_simplex_vertex_iterator(SimplexTree const* st, Simplex_handle sh) : sib_(st->self_siblings(sh)), v_(sh->first) { } @@ -166,15 +164,11 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade< // Most of the storage should be moved to the range, iterators should be light. Vertex_handle last_; // last vertex of the simplex Vertex_handle next_; // next vertex to push in suffix_ -#if BOOST_VERSION >= 105600 // 40 seems a conservative bound on the dimension of a Simplex_tree for now, // as it would not fit on the biggest hard-drive. boost::container::static_vector suffix_; // static_vector still has some overhead compared to a trivial hand-made // version using std::aligned_storage, or compared to making suffix_ static. -#else - std::vector suffix_; -#endif Siblings * sib_; // where the next search will start from Simplex_handle sh_; // current Simplex_handle in the boundary SimplexTree * st_; // simplex containing the simplicial complex diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp index 58bfa8db..2a2e2b25 100644 --- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp +++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp @@ -986,5 +986,41 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested << " - num_simplices = " << st.num_simplices() << std::endl; BOOST_CHECK(st.dimension() == 1); BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1); +} +BOOST_AUTO_TEST_CASE_TEMPLATE(generators, typeST, list_of_tested_variants) { + std::cout << "********************************************************************" << std::endl; + std::cout << "TEST FIND GENERATORS" << std::endl; + { + typeST st; + st.insert_simplex_and_subfaces({0,1,2,3,4,5,6},0); + st.assign_filtration(st.find({0,2,4}), 10); + st.assign_filtration(st.find({1,5}), 20); + st.assign_filtration(st.find({1,2,4}), 30); + st.assign_filtration(st.find({3}), 5); + st.make_filtration_non_decreasing(); + BOOST_CHECK(st.filtration(st.find({1,2}))==0); + BOOST_CHECK(st.filtration(st.find({0,1,2,3,4}))==30); + BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,1,2,3,4,5}))==st.find({1,2,4})); + BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,2,3}))==st.find({3})); + auto s=st.minimal_simplex_with_same_filtration(st.find({0,2,6})); + BOOST_CHECK(s==st.find({0})||s==st.find({2})||s==st.find({6})); + BOOST_CHECK(st.vertex_with_same_filtration(st.find({2}))==2); + BOOST_CHECK(st.vertex_with_same_filtration(st.find({1,5}))==st.null_vertex()); + BOOST_CHECK(st.vertex_with_same_filtration(st.find({5,6}))>=5); + } + { + typeST st; + st.insert_simplex_and_subfaces({0,1}, 8); + st.insert_simplex_and_subfaces({0,2}, 10); + st.insert_simplex_and_subfaces({3,4}, 6); + st.insert_simplex_and_subfaces({1,2}, 5); + st.insert_simplex_and_subfaces({1,5}, 4); + st.insert_simplex_and_subfaces({0,5}, 3); + st.insert_simplex_and_subfaces({2,5}, 2); + st.insert_simplex_and_subfaces({1,3}, 9); + st.expansion(50); + BOOST_CHECK(st.edge_with_same_filtration(st.find({0,1,2,5}))==st.find({0,2})); + BOOST_CHECK(st.edge_with_same_filtration(st.find({1,5}))==st.find({1,5})); + } } -- cgit v1.2.3 From 5748647e9e89bb91e361b06c7c6cb053081618bf Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 6 Mar 2020 18:10:05 +0100 Subject: Fix doc of minimal_simplex_with_same_filtration --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 7315bf45..b6973756 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1530,13 +1530,11 @@ class Simplex_tree { return null_simplex(); } - /** \brief Returns an edge of `sh` that has the same filtration value as `sh` if it exists, and `null_simplex()` otherwise. + /** \brief Returns a minimal face of `sh` that has the same filtration value as `sh`. * - * For a flag-complex built with `expansion()`, this is a way to invert the process and find out which edge had its filtration value propagated to `sh`. - * If several edges have the same filtration value, the one it returns is arbitrary. */ + * For a complex built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which simplex had its filtration value propagated to `sh`. + * If several minimal (for inclusion) simplices have the same filtration value, the one it returns is arbitrary, and it is not guaranteed to be the one with smallest dimension. */ Simplex_handle minimal_simplex_with_same_filtration(Simplex_handle sh) { - if(dimension(sh) == 0) // vertices are minimal - return sh; auto filt = filtration_(sh); // Naive implementation, it can be sped up. for(auto b : boundary_simplex_range(sh)) -- cgit v1.2.3 From 8bd39f74f69e8fcb662873e0e045c953b814f28f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 6 Mar 2020 18:21:40 +0100 Subject: Tweak doc. --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index b6973756..ad592a92 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1532,7 +1532,7 @@ class Simplex_tree { /** \brief Returns a minimal face of `sh` that has the same filtration value as `sh`. * - * For a complex built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which simplex had its filtration value propagated to `sh`. + * For a filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which simplex had its filtration value propagated to `sh`. * If several minimal (for inclusion) simplices have the same filtration value, the one it returns is arbitrary, and it is not guaranteed to be the one with smallest dimension. */ Simplex_handle minimal_simplex_with_same_filtration(Simplex_handle sh) { auto filt = filtration_(sh); -- cgit v1.2.3 From 64199fd8037556f135f90102ba8270cccf9d3e60 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 7 Mar 2020 01:08:10 +0100 Subject: persistence generators for lower-star and flag filtrations --- src/python/gudhi/simplex_tree.pxd | 2 + src/python/gudhi/simplex_tree.pyx | 55 ++++++++ .../include/Persistent_cohomology_interface.h | 138 ++++++++++++++++----- 3 files changed, 167 insertions(+), 28 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 96d14079..4e435c67 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -53,3 +53,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": vector[pair[double,double]] intervals_in_dimension(int dimension) void write_output_diagram(string diagram_file_name) vector[pair[vector[int], vector[int]]] persistence_pairs() + pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() + pair[vector[vector[int]], vector[vector[int]]] flag_generators() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index b18627c4..1c9b9cf1 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -514,3 +514,58 @@ cdef class SimplexTree: else: print("intervals_in_dim function requires persistence function" " to be launched first.") + + def lower_star_persistence_generators(self): + """Assuming this is a lower-star filtration, this function returns the persistence pairs, + where each simplex is replaced with the vertex that gave it its filtration value. + + :returns: first the regular persistence pairs, grouped by dimension, with one vertex per extremity, + and second the essential features, grouped by dimension, with one vertex each + :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]] + + :note: intervals_in_dim function requires + :func:`persistence()` + function to be launched first. + """ + if self.pcohptr != NULL: + gen = self.pcohptr.lower_star_generators() + normal = [np_array(d).reshape(-1,2) for d in gen.first] + infinite = [np_array(d) for d in gen.second] + return (normal, infinite) + else: + print("lower_star_persistence_generators() requires that persistence() be called first.") + + def flag_persistence_generators(self): + """Assuming this is a flag complex, this function returns the persistence pairs, + where each simplex is replaced with the vertices of the edges that gave it its filtration value. + + :returns: first the regular persistence pairs of dimension 0, with one vertex for birth and two for death; + then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity; + then the connected components, with one vertex each; + finally the other essential features, grouped by dimension, with 2 vertices for birth. + :rtype: Tuple[List[numpy.array[int] of shape (n,3)], List[numpy.array[int] of shape (m,4)], List[numpy.array[int] of shape (l,)], List[numpy.array[int] of shape (k,2)]] + + :note: intervals_in_dim function requires + :func:`persistence()` + function to be launched first. + """ + if self.pcohptr != NULL: + gen = self.pcohptr.flag_generators() + if len(gen.first) == 0: + normal0 = np_array([]) + normals = np_array([]) + else: + l = iter(gen.first) + normal0 = np_array(next(l)).reshape(-1,3) + normals = [np_array(d).reshape(-1,4) for d in l] + if len(gen.second) == 0: + infinite0 = np_array([]) + infinites = np_array([]) + else: + l = iter(gen.second) + infinite0 = np_array(next(l)) + infinites = [np_array(d).reshape(-1,3) for d in l] + + return (normal0, normals, infinite0, infinites) + else: + print("lower_star_persistence_generators() requires that persistence() be called first.") diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 8c79e6f3..6e9aac52 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -23,61 +23,55 @@ template class Persistent_cohomology_interface : public persistent_cohomology::Persistent_cohomology { private: + typedef persistent_cohomology::Persistent_cohomology Base; /* * Compare two intervals by dimension, then by length. */ struct cmp_intervals_by_dim_then_length { - explicit cmp_intervals_by_dim_then_length(FilteredComplex * sc) - : sc_(sc) { } - template bool operator()(const Persistent_interval & p1, const Persistent_interval & p2) { - if (sc_->dimension(get < 0 > (p1)) == sc_->dimension(get < 0 > (p2))) - return (sc_->filtration(get < 1 > (p1)) - sc_->filtration(get < 0 > (p1)) - > sc_->filtration(get < 1 > (p2)) - sc_->filtration(get < 0 > (p2))); + if (std::get<0>(p1) == std::get<0>(p2)) { + auto& i1 = std::get<1>(p1); + auto& i2 = std::get<1>(p2); + return std::get<1>(i1) - std::get<0>(i1) > std::get<1>(i2) - std::get<0>(i2); + } else - return (sc_->dimension(get < 0 > (p1)) > sc_->dimension(get < 0 > (p2))); + return (std::get<0>(p1) > std::get<0>(p2)); + // Why does this sort by decreasing dimension? } - FilteredComplex* sc_; }; public: Persistent_cohomology_interface(FilteredComplex* stptr) - : persistent_cohomology::Persistent_cohomology(*stptr), + : Base(*stptr), stptr_(stptr) { } Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max) - : persistent_cohomology::Persistent_cohomology(*stptr, persistence_dim_max), + : Base(*stptr, persistence_dim_max), stptr_(stptr) { } std::vector>> get_persistence(int homology_coeff_field, double min_persistence) { - persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); - persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); - - // Custom sort and output persistence - cmp_intervals_by_dim_then_length cmp(stptr_); - auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); - std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); + Base::init_coefficients(homology_coeff_field); + Base::compute_persistent_cohomology(min_persistence); + auto const& persistent_pairs = Base::get_persistent_pairs(); std::vector>> persistence; + persistence.reserve(persistent_pairs.size()); for (auto pair : persistent_pairs) { - persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)), - std::make_pair(stptr_->filtration(get<0>(pair)), - stptr_->filtration(get<1>(pair))))); + persistence.emplace_back(stptr_->dimension(get<0>(pair)), + std::make_pair(stptr_->filtration(get<0>(pair)), + stptr_->filtration(get<1>(pair)))); } + // Custom sort and output persistence + cmp_intervals_by_dim_then_length cmp; + std::sort(std::begin(persistence), std::end(persistence), cmp); return persistence; } std::vector, std::vector>> persistence_pairs() { - auto pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); - std::vector, std::vector>> persistence_pairs; + auto const& pairs = Base::get_persistent_pairs(); persistence_pairs.reserve(pairs.size()); for (auto pair : pairs) { std::vector birth; @@ -89,16 +83,104 @@ persistent_cohomology::Persistent_cohomology death; if (get<1>(pair) != stptr_->null_simplex()) { + death.reserve(birth.size()+1); for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))) { death.push_back(vertex); } } - persistence_pairs.push_back(std::make_pair(birth, death)); + persistence_pairs.emplace_back(std::move(birth), std::move(death)); } return persistence_pairs; } + // TODO: (possibly at the python level) + // - an option to ignore intervals of length 0? + // - an option to return only some of those vectors? + typedef std::pair>, std::vector>> Generators; + + Generators lower_star_generators() { + Generators out; + // diags[i] should be interpreted as vector> + auto& diags = out.first; + // diagsinf[i] should be interpreted as vector + auto& diagsinf = out.second; + for (auto pair : Base::get_persistent_pairs()) { + auto s = std::get<0>(pair); + auto t = std::get<1>(pair); + int dim = stptr_->dimension(s); + auto v = stptr_->vertex_with_same_filtration(s); + if(t == stptr_->null_simplex()) { + while(diagsinf.size() < dim+1) diagsinf.emplace_back(); + diagsinf[dim].push_back(v); + } else { + while(diags.size() < dim+1) diags.emplace_back(); + auto w = stptr_->vertex_with_same_filtration(t); + diags[dim].push_back(v); + diags[dim].push_back(w); + } + } + return out; + } + + Generators flag_generators() { + Generators out; + // diags[0] should be interpreted as vector> and other diags[i] as vector> + auto& diags = out.first; + // diagsinf[0] should be interpreted as vector and other diagsinf[i] as vector> + auto& diagsinf = out.second; + for (auto pair : Base::get_persistent_pairs()) { + auto s = std::get<0>(pair); + auto t = std::get<1>(pair); + int dim = stptr_->dimension(s); + bool infinite = t == stptr_->null_simplex(); + if(infinite) { + if(dim == 0) { + auto v = *std::begin(stptr_->simplex_vertex_range(s)); + if(diagsinf.size()==0)diagsinf.emplace_back(); + diagsinf[0].push_back(v); + } else { + auto e = stptr_->edge_with_same_filtration(s); + auto&& e_vertices = stptr_->simplex_vertex_range(e); + auto i = std::begin(e_vertices); + auto v1 = *i; + auto v2 = *++i; + GUDHI_CHECK(++i==std::end(e_vertices), "must be an edge"); + while(diagsinf.size() < dim+1) diagsinf.emplace_back(); + diagsinf[dim].push_back(v1); + diagsinf[dim].push_back(v2); + } + } else { + auto et = stptr_->edge_with_same_filtration(t); + auto&& et_vertices = stptr_->simplex_vertex_range(et); + auto it = std::begin(et_vertices); + auto w1 = *it; + auto w2 = *++it; + GUDHI_CHECK(++it==std::end(et_vertices), "must be an edge"); + if(dim == 0) { + auto v = *std::begin(stptr_->simplex_vertex_range(s)); + if(diags.size()==0)diags.emplace_back(); + diags[0].push_back(v); + diags[0].push_back(w1); + diags[0].push_back(w2); + } else { + auto es = stptr_->edge_with_same_filtration(s); + auto&& es_vertices = stptr_->simplex_vertex_range(es); + auto is = std::begin(es_vertices); + auto v1 = *is; + auto v2 = *++is; + GUDHI_CHECK(++is==std::end(es_vertices), "must be an edge"); + while(diags.size() < dim+1) diags.emplace_back(); + diags[dim].push_back(v1); + diags[dim].push_back(v2); + diags[dim].push_back(w1); + diags[dim].push_back(w2); + } + } + } + return out; + } + private: // A copy FilteredComplex* stptr_; -- cgit v1.2.3 From 35e08b30836fb0c419c0377eaf51d2a3b16e7670 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 7 Mar 2020 14:05:05 +0100 Subject: min_persistence for generators --- src/python/gudhi/simplex_tree.pxd | 4 +-- src/python/gudhi/simplex_tree.pyx | 36 +++++++++++++--------- .../include/Persistent_cohomology_interface.h | 10 ++++-- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 4e435c67..53e2bbc9 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -53,5 +53,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": vector[pair[double,double]] intervals_in_dimension(int dimension) void write_output_diagram(string diagram_file_name) vector[pair[vector[int], vector[int]]] persistence_pairs() - pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() - pair[vector[vector[int]], vector[vector[int]]] flag_generators() + pair[vector[vector[int]], vector[vector[int]]] lower_star_generators(double) + pair[vector[vector[int]], vector[vector[int]]] flag_generators(double) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 1c9b9cf1..3f582ac9 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -395,7 +395,7 @@ cdef class SimplexTree: :param min_persistence: The minimum persistence value to take into account (strictly greater than min_persistence). Default value is 0.0. - Sets min_persistence to -1.0 to see all values. + Set min_persistence to -1.0 to see all values. :type min_persistence: float. :param persistence_dim_max: If true, the persistent homology for the maximal dimension in the complex is computed. If false, it is @@ -515,42 +515,48 @@ cdef class SimplexTree: print("intervals_in_dim function requires persistence function" " to be launched first.") - def lower_star_persistence_generators(self): + def lower_star_persistence_generators(self, min_persistence=0.): """Assuming this is a lower-star filtration, this function returns the persistence pairs, where each simplex is replaced with the vertex that gave it its filtration value. - :returns: first the regular persistence pairs, grouped by dimension, with one vertex per extremity, + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Set min_persistence to -1.0 to see all values. + :type min_persistence: float. + :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity, and second the essential features, grouped by dimension, with one vertex each :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]] - :note: intervals_in_dim function requires - :func:`persistence()` - function to be launched first. + :note: lower_star_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.lower_star_generators() + gen = self.pcohptr.lower_star_generators(min_persistence) normal = [np_array(d).reshape(-1,2) for d in gen.first] infinite = [np_array(d) for d in gen.second] return (normal, infinite) else: print("lower_star_persistence_generators() requires that persistence() be called first.") - def flag_persistence_generators(self): + def flag_persistence_generators(self, min_persistence=0.): """Assuming this is a flag complex, this function returns the persistence pairs, where each simplex is replaced with the vertices of the edges that gave it its filtration value. - :returns: first the regular persistence pairs of dimension 0, with one vertex for birth and two for death; + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Set min_persistence to -1.0 to see all values. + :type min_persistence: float. + :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death; then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity; then the connected components, with one vertex each; finally the other essential features, grouped by dimension, with 2 vertices for birth. - :rtype: Tuple[List[numpy.array[int] of shape (n,3)], List[numpy.array[int] of shape (m,4)], List[numpy.array[int] of shape (l,)], List[numpy.array[int] of shape (k,2)]] + :rtype: Tuple[numpy.array[int] of shape (n,3), List[numpy.array[int] of shape (m,4)], numpy.array[int] of shape (l,), List[numpy.array[int] of shape (k,2)]] - :note: intervals_in_dim function requires - :func:`persistence()` - function to be launched first. + :note: flag_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.flag_generators() + gen = self.pcohptr.flag_generators(min_persistence) if len(gen.first) == 0: normal0 = np_array([]) normals = np_array([]) @@ -568,4 +574,4 @@ cdef class SimplexTree: return (normal0, normals, infinite0, infinites) else: - print("lower_star_persistence_generators() requires that persistence() be called first.") + print("flag_persistence_generators() requires that persistence() be called first.") diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 6e9aac52..8e721fc0 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -95,11 +95,10 @@ persistent_cohomology::Persistent_cohomology>, std::vector>> Generators; - Generators lower_star_generators() { + Generators lower_star_generators(double min_persistence) { Generators out; // diags[i] should be interpreted as vector> auto& diags = out.first; @@ -108,6 +107,8 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); + if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) + continue; int dim = stptr_->dimension(s); auto v = stptr_->vertex_with_same_filtration(s); if(t == stptr_->null_simplex()) { @@ -123,7 +124,8 @@ persistent_cohomology::Persistent_cohomology> and other diags[i] as vector> auto& diags = out.first; @@ -132,6 +134,8 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); + if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) + continue; int dim = stptr_->dimension(s); bool infinite = t == stptr_->null_simplex(); if(infinite) { -- cgit v1.2.3 From 08be68c1fb3c05a35d738eab53712ec6cb4d1ad5 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 7 Mar 2020 14:14:45 +0100 Subject: [ci skip] Comment --- src/python/include/Persistent_cohomology_interface.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 8e721fc0..22d6f654 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -125,6 +125,7 @@ persistent_cohomology::Persistent_cohomology> and other diags[i] as vector> -- cgit v1.2.3 From 55c1385419edd4e152df219dfff596d2631367f1 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 8 Mar 2020 11:15:04 +0100 Subject: Typo in shape of array --- src/python/gudhi/simplex_tree.pyx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 3f582ac9..d5f642d1 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -8,6 +8,7 @@ # - YYYY/MM Author: Description of the modification from libc.stdint cimport intptr_t +import numpy from numpy import array as np_array cimport simplex_tree @@ -558,19 +559,19 @@ cdef class SimplexTree: if self.pcohptr != NULL: gen = self.pcohptr.flag_generators(min_persistence) if len(gen.first) == 0: - normal0 = np_array([]) - normals = np_array([]) + normal0 = numpy.empty((0,3)) + normals = [] else: l = iter(gen.first) normal0 = np_array(next(l)).reshape(-1,3) normals = [np_array(d).reshape(-1,4) for d in l] if len(gen.second) == 0: - infinite0 = np_array([]) - infinites = np_array([]) + infinite0 = numpy.empty(0) + infinites = [] else: l = iter(gen.second) infinite0 = np_array(next(l)) - infinites = [np_array(d).reshape(-1,3) for d in l] + infinites = [np_array(d).reshape(-1,2) for d in l] return (normal0, normals, infinite0, infinites) else: -- cgit v1.2.3 From 5bc96fdf837e4acb80b1333b9db63ddf5802edc8 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 9 Mar 2020 12:12:13 +0100 Subject: removed infty line plot in plot_diagram if no pts at infty --- src/python/gudhi/persistence_graphical_tools.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index 8c38b684..cc3db467 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -296,17 +296,6 @@ def plot_persistence_diagram( axis_end = max_death + delta / 2 axis_start = min_birth - delta - # infinity line and text - axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k") - axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha) - # Infinity label - yt = axes.get_yticks() - yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity - yt = np.append(yt, infinity) - ytl = ["%.3f" % e for e in yt] # to avoid float precision error - ytl[-1] = r'$+\infty$' - axes.set_yticks(yt) - axes.set_yticklabels(ytl) # bootstrap band if band > 0.0: x = np.linspace(axis_start, infinity, 1000) @@ -315,6 +304,7 @@ def plot_persistence_diagram( if greyblock: axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey')) # Draw points in loop + pts_at_infty = False # Records presence of pts at infty for interval in reversed(persistence): if float(interval[1][1]) != float("inf"): # Finite death case @@ -325,10 +315,23 @@ def plot_persistence_diagram( color=colormap[interval[0]], ) else: + pts_at_infty = True # Infinite death case for diagram to be nicer axes.scatter( interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]] ) + if pts_at_infty: + # infinity line and text + axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k") + axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha) + # Infinity label + yt = axes.get_yticks() + yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity + yt = np.append(yt, infinity) + ytl = ["%.3f" % e for e in yt] # to avoid float precision error + ytl[-1] = r'$+\infty$' + axes.set_yticks(yt) + axes.set_yticklabels(ytl) if legend: dimensions = list(set(item[0] for item in persistence)) -- cgit v1.2.3 From d1d25b4ae8d0f778f0e2b3f98449d7d13e466013 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 10 Mar 2020 09:04:45 +0100 Subject: Fix example - only fails on OSx --- src/python/example/alpha_complex_from_points_example.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py index 465632eb..73faf17c 100755 --- a/src/python/example/alpha_complex_from_points_example.py +++ b/src/python/example/alpha_complex_from_points_example.py @@ -46,6 +46,9 @@ if simplex_tree.find([4]): else: print("[4] Not found...") +# Some insertions, simplex_tree needs to initialize filtrations +simplex_tree.initialize_filtration() + print("dimension=", simplex_tree.dimension()) print("filtrations=") for simplex_with_filtration in simplex_tree.get_filtration(): -- cgit v1.2.3 From 2eca5c75b1fbd7157e2656b875e730dc5f00f373 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 15:45:45 +0100 Subject: removed P[P < 0.5] thresholding ; as it shouldn't happen anymore. --- src/python/gudhi/wasserstein.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index aab0cb3c..e28c63e6 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -130,7 +130,6 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): if matching: P = ot.emd(a=a,b=b,M=M, numItermax=2000000) ot_cost = np.sum(np.multiply(P,M)) - P[P < 0.5] = 0 # trick to avoid numerical issue, could it be improved? match = np.argwhere(P) # Now we turn to -1 points encoding the diagonal match = _clean_match(match, n, m) -- cgit v1.2.3 From 7a5b614aa3bd06897e0135f0cda4e61f16951b20 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Tue, 10 Mar 2020 16:46:11 +0100 Subject: Try without trigger --- azure-pipelines.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 77e0ac88..95b15db2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,9 +1,3 @@ -trigger: - batch: true - branches: - include: - - '*' # All branches - jobs: - job: 'Test' -- cgit v1.2.3 From 967ceab26b09ad74e0cff0d84429a766af267f6b Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 16:47:09 +0100 Subject: removed _clean_match and changed matching format, it is now a (n x 2) numpy array --- src/python/gudhi/wasserstein.py | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index e28c63e6..9efa946e 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -64,34 +64,13 @@ def _perstot(X, order, internal_p): return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) -def _clean_match(match, n, m): - ''' - :param match: a list of the form [(i,j) ...] - :param n: int, size of the first dgm - :param m: int, size of the second dgm - :return: a modified version of match where indices greater than n, m are replaced by -1, encoding the diagonal. - and (-1, -1) are removed - ''' - new_match = [] - for i,j in match: - if i >= n: - if j < m: - new_match.append((-1, j)) - elif j >= m: - if i < n: - new_match.append((i,-1)) - else: - new_match.append((i,j)) - return new_match - - def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): ''' :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. :param matching: if True, computes and returns the optimal matching between X and Y, encoded as - a list of tuple [...(i,j)...], meaning the i-th point in X is matched to + a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to the j-th point in Y, with the convention (-1) represents the diagonal. :param order: exponent for Wasserstein; Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); @@ -114,12 +93,12 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): if not matching: return _perstot(Y, order, internal_p) else: - return _perstot(Y, order, internal_p), [(-1, j) for j in range(m)] + return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)]) elif Y.size == 0: if not matching: return _perstot(X, order, internal_p) else: - return _perstot(X, order, internal_p), [(i, -1) for i in range(n)] + return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)]) M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) a = np.ones(n+1) # weight vector of the input diagram. Uniform here. @@ -130,9 +109,11 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): if matching: P = ot.emd(a=a,b=b,M=M, numItermax=2000000) ot_cost = np.sum(np.multiply(P,M)) + P[-1, -1] = 0 # Remove matching corresponding to the diagonal match = np.argwhere(P) # Now we turn to -1 points encoding the diagonal - match = _clean_match(match, n, m) + match[:,0][match[:,0] >= n] = -1 + match[:,1][match[:,1] >= m] = -1 return ot_cost ** (1./order) , match # Comptuation of the otcost using the ot.emd2 library. -- cgit v1.2.3 From 4aea5deab6ce4cbb491f4c9c2b7e9f023efbbe01 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 17:41:38 +0100 Subject: changed output of matching as a (n x 2) array, adapted tests and doc --- src/python/doc/wasserstein_distance_user.rst | 2 +- src/python/gudhi/wasserstein.py | 2 +- src/python/test/test_wasserstein_distance.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index d3daa318..9519caa6 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -69,5 +69,5 @@ The output is: .. testoutput:: - Wasserstein distance value = 2.15, optimal matching: [(0, 0), (1, 2), (2, -1), (-1, 1)] + Wasserstein distance value = 2.15, optimal matching: [[0, 0], [1, 2], [2, -1], [-1, 1]] diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 9efa946e..9e4dc7d5 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -88,7 +88,7 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): if not matching: return 0. else: - return 0., [] + return 0., np.array([]) else: if not matching: return _perstot(Y, order, internal_p) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index d0f0323c..ca9a4a61 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -61,15 +61,15 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat if test_matching: match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1] - assert match == [] + assert np.array_equal(match, np.array([])) match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] - assert match == [] + assert np.array_equal(match, np.array([])) match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1] - assert match == [(-1, 0), (-1, 1)] + assert np.array_equal(match , np.array([[-1, 0], [-1, 1]])) match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] - assert match == [(0, -1), (1, -1)] + assert np.array_equal(match , np.array([[0, -1], [1, -1]])) match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] - assert match == [(0, 0), (1, 1), (2, -1)] + assert np.array_equal(match, np.array_equal([[0, 0], [1, 1], [2, -1]])) -- cgit v1.2.3 From fc4e10863d103ee6bc22863f48548fe246a3ddd6 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 18:03:21 +0100 Subject: correction of typo in the doc --- src/python/gudhi/wasserstein.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 9e4dc7d5..12337780 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -30,10 +30,10 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): :param order: exponent for the Wasserstein metric. :param internal_p: Ground metric (i.e. norm L^p). :returns: (n+1) x (m+1) np.array encoding the cost matrix C. - For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], - while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) + For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], + while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal. - note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal). + note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). ''' Xdiag = _proj_on_diag(X) Ydiag = _proj_on_diag(Y) -- cgit v1.2.3 From 6c369a6aa566dfcb8cdb501d0c39eafb32219669 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 18:08:15 +0100 Subject: fix typo in test_wasserstein_distance --- src/python/test/test_wasserstein_distance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index ca9a4a61..f92208c0 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -69,7 +69,7 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] assert np.array_equal(match , np.array([[0, -1], [1, -1]])) match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] - assert np.array_equal(match, np.array_equal([[0, 0], [1, 1], [2, -1]])) + assert np.array_equal(match, np.array([[0, 0], [1, 1], [2, -1]])) -- cgit v1.2.3 From 753290475ab6e95c2de1baad97ee6f755a0ce19a Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Tue, 10 Mar 2020 18:25:10 +0100 Subject: Update src/python/gudhi/wasserstein.py Co-Authored-By: Marc Glisse --- src/python/gudhi/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 12337780..83a682df 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -32,7 +32,7 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): :returns: (n+1) x (m+1) np.array encoding the cost matrix C. For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) - and its orthogonal proj onto the diagonal. + and its orthogonal projection onto the diagonal. note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). ''' Xdiag = _proj_on_diag(X) -- cgit v1.2.3 From c9d6e27495c8927d736d593afb0450360b46ccc9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 10 Mar 2020 18:55:19 +0100 Subject: fix indentation in wasserstein --- src/python/gudhi/wasserstein.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 83a682df..3dd993f9 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -70,13 +70,13 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. :param matching: if True, computes and returns the optimal matching between X and Y, encoded as - a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to - the j-th point in Y, with the convention (-1) represents the diagonal. + a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to + the j-th point in Y, with the convention (-1) represents the diagonal. :param order: exponent for Wasserstein; Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); - Default value is 2 (Euclidean norm). + Default value is 2 (Euclidean norm). :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with - respect to the internal_p-norm as ground metric. + respect to the internal_p-norm as ground metric. If matching is set to True, also returns the optimal matching between X and Y. ''' n = len(X) -- cgit v1.2.3 From 5a737eefc7abd690e8a174d2557d0157e77f5f4c Mon Sep 17 00:00:00 2001 From: mathieu Date: Tue, 10 Mar 2020 19:13:37 -0400 Subject: new fixes --- .../include/gudhi/Bitmap_cubical_complex.h | 1 - src/python/gudhi/cubical_complex.pyx | 32 +++++++++++----------- src/python/gudhi/periodic_cubical_complex.pyx | 28 +++++++++++++++++++ src/python/gudhi/simplex_tree.pyx | 2 +- .../include/Persistent_cohomology_interface.h | 3 +- src/python/test/test_cubical_complex.py | 5 ++++ 6 files changed, 52 insertions(+), 19 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h index bf09532e..37514dee 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h @@ -340,7 +340,6 @@ class Bitmap_cubical_complex : public T { * that provides ranges for the Boundary_simplex_iterator. **/ Boundary_simplex_range boundary_simplex_range(Simplex_handle sh) { return this->get_boundary_of_a_cell(sh); } - Boundary_simplex_range coboundary_simplex_range(Simplex_handle sh) { return this->get_coboundary_of_a_cell(sh); } /** * filtration_simplex_range creates an object of a Filtration_simplex_range class diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 8cf43539..9e701fe6 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -148,22 +148,22 @@ cdef class CubicalComplex: def cofaces_of_persistence_pairs(self): """A persistence interval is described by a pair of cells, one that creates the - feature and one that kills it. The filtration values of those 2 cells give coordinates - for a point in a persistence diagram, or a bar in a barcode. Structurally, in the - cubical complexes provided here, the filtration value of any cell is the minimum of the - filtration values of the maximal cells that contain it. Connecting persistence diagram - coordinates to the corresponding value in the input (i.e. the filtration values of - the top-dimensional cells) is useful for differentiation purposes. - - This function returns a list of pairs of top-dimensional cells corresponding to - the persistence birth and death cells of the filtration. The cells are represented by - their indices in the input list of top-dimensional cells (and not their indices in the - internal datastructure that includes non-maximal cells). Note that when two adjacent - top-dimensional cells have the same filtration value, we arbitrarily return one of the two - when calling the function on one of their common faces. - - :returns: The top-dimensional cells/cofaces of the positive and negative cells. - :rtype: list of pairs(index of positive top-dimensional cell, index of negative top-dimensional cell) + feature and one that kills it. The filtration values of those 2 cells give coordinates + for a point in a persistence diagram, or a bar in a barcode. Structurally, in the + cubical complexes provided here, the filtration value of any cell is the minimum of the + filtration values of the maximal cells that contain it. Connecting persistence diagram + coordinates to the corresponding value in the input (i.e. the filtration values of + the top-dimensional cells) is useful for differentiation purposes. + + This function returns a list of pairs of top-dimensional cells corresponding to + the persistence birth and death cells of the filtration. The cells are represented by + their indices in the input list of top-dimensional cells (and not their indices in the + internal datastructure that includes non-maximal cells). Note that when two adjacent + top-dimensional cells have the same filtration value, we arbitrarily return one of the two + when calling the function on one of their common faces. + + :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. + :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. """ cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index 37f76201..ba039e80 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -31,6 +31,7 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>>": Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + vector[vector[int]] cofaces_of_cubical_persistence_pairs() vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -155,6 +156,33 @@ cdef class PeriodicCubicalComplex: persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) return persistence_result + def cofaces_of_persistence_pairs(self): + """A persistence interval is described by a pair of cells, one that creates the + feature and one that kills it. The filtration values of those 2 cells give coordinates + for a point in a persistence diagram, or a bar in a barcode. Structurally, in the + cubical complexes provided here, the filtration value of any cell is the minimum of the + filtration values of the maximal cells that contain it. Connecting persistence diagram + coordinates to the corresponding value in the input (i.e. the filtration values of + the top-dimensional cells) is useful for differentiation purposes. + + This function returns a list of pairs of top-dimensional cells corresponding to + the persistence birth and death cells of the filtration. The cells are represented by + their indices in the input list of top-dimensional cells (and not their indices in the + internal datastructure that includes non-maximal cells). Note that when two adjacent + top-dimensional cells have the same filtration value, we arbitrarily return one of the two + when calling the function on one of their common faces. + + :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. + :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. + """ + cdef vector[vector[int]] persistence_result + if self.pcohptr != NULL: + persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() + else: + print("cofaces_of_persistence_pairs function requires persistence function" + " to be launched first.") + return np.array(persistence_result) + def betti_numbers(self): """This function returns the Betti numbers of the complex. diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 85d25492..b18627c4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -508,7 +508,7 @@ cdef class SimplexTree: """ if self.pcohptr != NULL: if persistence_file != '': - self.pcohptr.write_output_diagram(str.encode(persistence_file)) + self.pcohptr.write_output_diagram(persistence_file.encode('utf-8')) else: print("persistence_file must be specified") else: diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index e5accf50..defac88c 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -75,12 +75,13 @@ persistent_cohomology::Persistent_cohomologydimension(splx) == stptr_->dimension()){return splx;} else{ - for (auto v : stptr_->coboundary_simplex_range(splx)){ + for (auto v : stptr_->get_coboundary_of_a_cell(splx)){ if(stptr_->filtration(v) == stptr_->filtration(splx)){ return top_dimensional_coface(v); } } } + return splx; } std::vector> cofaces_of_cubical_persistence_pairs() { diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py index 8c1b2600..8af63355 100755 --- a/src/python/test/test_cubical_complex.py +++ b/src/python/test/test_cubical_complex.py @@ -147,3 +147,8 @@ def test_connected_sublevel_sets(): periodic_dimensions = periodic_dimensions) assert cub.persistence() == [(0, (2.0, float("inf")))] assert cub.betti_numbers() == [1, 0, 0] + +def test_connected_sublevel_sets(): + cub = CubicalComplex(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + cub.persistence() + assert cub.cofaces_of_persistence_pairs() == np.array([[1, 7, 4], [0, 8, -1]]) -- cgit v1.2.3 From a47ace987876cb52351ae9223d335629aedbd71e Mon Sep 17 00:00:00 2001 From: mathieu Date: Tue, 10 Mar 2020 19:44:57 -0400 Subject: new fixes --- ext/hera | 2 +- src/python/gudhi/representations/metrics.py | 27 ++++++++++++--------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/ext/hera b/ext/hera index cb1838e6..9a899718 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 +Subproject commit 9a89971855acefe39dce0e2adadf53b88ca8f683 diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index c5439a67..0659b457 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -10,17 +10,9 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances -from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance from gudhi.hera import wasserstein_distance as hera_wasserstein_distance from .preprocessing import Padding -try: - from .. import bottleneck_distance - USE_GUDHI = True -except ImportError: - USE_GUDHI = False - print("Gudhi built without CGAL: BottleneckDistance will return a null matrix") - ############################################# # Metrics ################################### ############################################# @@ -111,9 +103,13 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) if metric == "bottleneck": return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) - elif metric == "wasserstein" or metric == "pot_wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) - elif metric == "hera_wasserstein": + elif metric == "pot_wasserstein": + try: + from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance + return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) + except ImportError: + print("Gudhi built without POT") + elif metric == "wasserstein" or metric == "hera_wasserstein": return pairwise_distances(XX, YY, metric=sklearn_wrapper(hera_wasserstein_distance, X, Y, **kwargs)) elif metric == "sliced_wasserstein": return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, X, Y, **kwargs)) @@ -192,16 +188,17 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances. """ - if not USE_GUDHI: - print("Gudhi built without CGAL: returning a null matrix") - Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) if USE_GUDHI else np.zeros((len(X), len(self.diagrams_))) + try: + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) + except ImportError: + print("Gudhi built without CGAL") return Xfit class WassersteinDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams. """ - def __init__(self, order=2, internal_p=2, mode="pot", delta=0.0001): + def __init__(self, order=2, internal_p=2, mode="pot", delta=0.01): """ Constructor for the WassersteinDistance class. -- cgit v1.2.3 From a17a09a2c58bba79e897d0ba00aada05da556967 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 11 Mar 2020 10:41:53 +0100 Subject: clean test_wasserstein from useless np.array --- src/python/test/test_wasserstein_distance.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index f92208c0..0d70e11a 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -61,15 +61,15 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat if test_matching: match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1] - assert np.array_equal(match, np.array([])) + assert np.array_equal(match, []) match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] - assert np.array_equal(match, np.array([])) + assert np.array_equal(match, []) match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1] - assert np.array_equal(match , np.array([[-1, 0], [-1, 1]])) + assert np.array_equal(match , [[-1, 0], [-1, 1]]) match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1] - assert np.array_equal(match , np.array([[0, -1], [1, -1]])) + assert np.array_equal(match , [[0, -1], [1, -1]]) match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] - assert np.array_equal(match, np.array([[0, 0], [1, 1], [2, -1]])) + assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]]) -- cgit v1.2.3 From 45b918a17cfa26a0c58d7871b869aa13b0e45019 Mon Sep 17 00:00:00 2001 From: mathieu Date: Wed, 11 Mar 2020 12:05:15 -0400 Subject: moved location of top_dimensional_coface function --- ext/hera | 2 +- .../include/gudhi/Bitmap_cubical_complex_base.h | 21 +++++++++++++++++++++ src/python/gudhi/cubical_complex.pyx | 4 +++- src/python/gudhi/periodic_cubical_complex.pyx | 4 +++- .../include/Persistent_cohomology_interface.h | 16 ++-------------- 5 files changed, 30 insertions(+), 17 deletions(-) diff --git a/ext/hera b/ext/hera index cb1838e6..9a899718 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 +Subproject commit 9a89971855acefe39dce0e2adadf53b88ca8f683 diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 0d6299d2..7496d74a 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -109,6 +109,14 @@ class Bitmap_cubical_complex_base { **/ virtual inline std::vector get_coboundary_of_a_cell(std::size_t cell) const; + /** + * This function computes the index of one of the top-dimensional cubes (chosen arbitrarily) associated + * to a given simplex handle. Note that the input parameter is not necessarily a cube, it might also + * be an edge or vertex of a cube. On the other hand, the output is always indicating the position of + * a cube in the data structure. + **/ + inline int get_top_dimensional_coface_of_a_cell(int splx); + /** * This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of * dimension n and a cube \f$B \subset A\f$ of dimension n-1, an incidence @@ -602,6 +610,19 @@ void Bitmap_cubical_complex_base::setup_bitmap_based_on_top_dimensional_cells this->impose_lower_star_filtration(); } +template +int Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(int splx) { + if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;} + else{ + for (auto v : this->get_coboundary_of_a_cell(splx)){ + if(this->get_cell_data(v) == this->get_cell_data(splx)){ + return this->get_top_dimensional_coface_of_a_cell(v); + } + } + } + return splx; +} + template Bitmap_cubical_complex_base::Bitmap_cubical_complex_base(const std::vector& sizes_in_following_directions, const std::vector& top_dimensional_cells) { diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 9e701fe6..84fec60e 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -163,7 +163,9 @@ cdef class CubicalComplex: when calling the function on one of their common faces. :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. - :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. + :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, + index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if + the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. """ cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index ba039e80..993d95c7 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -173,7 +173,9 @@ cdef class PeriodicCubicalComplex: when calling the function on one of their common faces. :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. - :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. + :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, + index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if + the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. """ cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index defac88c..77555349 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -72,18 +72,6 @@ persistent_cohomology::Persistent_cohomologydimension(splx) == stptr_->dimension()){return splx;} - else{ - for (auto v : stptr_->get_coboundary_of_a_cell(splx)){ - if(stptr_->filtration(v) == stptr_->filtration(splx)){ - return top_dimensional_coface(v); - } - } - } - return splx; - } - std::vector> cofaces_of_cubical_persistence_pairs() { // Warning: this function is meant to be used with CubicalComplex only!! @@ -104,14 +92,14 @@ persistent_cohomology::Persistent_cohomologydimension(get<0>(pair)); // Recursively get the top-dimensional cell / coface associated to the persistence generator - int face0 = top_dimensional_coface(get<0>(pair)); + int face0 = stptr_->get_top_dimensional_coface_of_a_cell(get<0>(pair)); // Retrieve the index of the corresponding top-dimensional cell in the input data int splx0 = order[face0]; int splx1 = -1; if (isfinite(stptr_->filtration(get<1>(pair)))){ // Recursively get the top-dimensional cell / coface associated to the persistence generator - int face1 = top_dimensional_coface(get<1>(pair)); + int face1 = stptr_->get_top_dimensional_coface_of_a_cell(get<1>(pair)); // Retrieve the index of the corresponding top-dimensional cell in the input data splx1 = order[face1]; } -- cgit v1.2.3 From 25e40a52ec7bc9e1bfe418fb1aa16e2a06994d1b Mon Sep 17 00:00:00 2001 From: mathieu Date: Wed, 11 Mar 2020 15:35:37 -0400 Subject: new fixes --- src/python/gudhi/representations/metrics.py | 63 +++++++++++++++++++++++------ 1 file changed, 50 insertions(+), 13 deletions(-) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 0659b457..f913f1fc 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -19,7 +19,7 @@ from .preprocessing import Padding def sliced_wasserstein_distance(D1, D2, num_directions): """ - This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. + This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param D2: (m x 2) numpy.array encoding the second diagram. :param num_directions: number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. @@ -39,6 +39,34 @@ def sliced_wasserstein_distance(D1, D2, num_directions): L1 = np.sum(np.abs(A-B), axis=0) return np.mean(L1) +def compute_persistence_diagram_projections(X, num_directions): + """ + This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams. + :param X: list of persistence diagrams. + :param num_directions: number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. + :returns: list of projected persistence diagrams. + :rtype: float + """ + thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] + lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) + XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X] + return XX + +def sliced_wasserstein_distance_on_projections(D1, D2): + """ + This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. + :param D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections. + :param D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections. + :returns: the sliced Wasserstein distance between the projected persistence diagrams. + :rtype: float + """ + lim1, lim2 = int(len(D1)/2), int(len(D2)/2) + approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:] + A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0) + B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0) + L1 = np.sum(np.abs(A-B), axis=0) + return np.mean(L1) + def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. @@ -90,31 +118,43 @@ def sklearn_wrapper(metric, X, Y, **kwargs): return metric(X[int(a[0])], Y[int(b[0])], **kwargs) return flat_metric +PAIRWISE_DISTANCE_FUNCTIONS = { + "wasserstein": hera_wasserstein_distance, + "hera_wasserstein": hera_wasserstein_distance, + "persistence_fisher": persistence_fisher_distance, +} + def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs): """ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). :param X: first list of persistence diagrams. :param Y: second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. - :param metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + :param metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. :returns: distance matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) :rtype: float """ XX = np.reshape(np.arange(len(X)), [-1,1]) YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) if metric == "bottleneck": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) + try: + from .. import bottleneck_distance + return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) + except ImportError: + print("Gudhi built without CGAL") + raise elif metric == "pot_wasserstein": try: from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) except ImportError: - print("Gudhi built without POT") - elif metric == "wasserstein" or metric == "hera_wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(hera_wasserstein_distance, X, Y, **kwargs)) + print("Gudhi built without POT. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'") + raise elif metric == "sliced_wasserstein": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance, X, Y, **kwargs)) - elif metric == "persistence_fisher": - return pairwise_distances(XX, YY, metric=sklearn_wrapper(persistence_fisher_distance, X, Y, **kwargs)) + Xproj = compute_persistence_diagram_projections(X, **kwargs) + Yproj = None if Y is None else compute_persistence_diagram_projections(Y, **kwargs) + return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance_on_projections, Xproj, Yproj)) + elif type(metric) == str: + return pairwise_distances(XX, YY, metric=sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs)) else: return pairwise_distances(XX, YY, metric=sklearn_wrapper(metric, X, Y, **kwargs)) @@ -188,10 +228,7 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances. """ - try: - Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) - except ImportError: - print("Gudhi built without CGAL") + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) return Xfit class WassersteinDistance(BaseEstimator, TransformerMixin): -- cgit v1.2.3 From 6552d09c3f290a25ee910e007084fe3809f8c8ed Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 12 Mar 2020 16:19:34 -0400 Subject: fixed error message --- src/python/gudhi/representations/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index f913f1fc..4070c321 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -147,7 +147,7 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) except ImportError: - print("Gudhi built without POT. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'") + print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'") raise elif metric == "sliced_wasserstein": Xproj = compute_persistence_diagram_projections(X, **kwargs) -- cgit v1.2.3 From 62eb0a311da737a58ec704c3c5ad93da871e57a0 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 12 Mar 2020 22:37:43 +0100 Subject: Point the hera submodule to github instead of bitbucket --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 6e8b3ab1..f70c570d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "ext/hera"] path = ext/hera - url = https://bitbucket.org/grey_narn/hera.git + url = https://github.com/grey-narn/hera.git -- cgit v1.2.3 From b0bd6dfc34a3c93073e9f326292047f8debb7fb3 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 12 Mar 2020 23:46:03 +0100 Subject: Update hera --- ext/hera | 2 +- src/cmake/modules/GUDHI_third_party_libraries.cmake | 2 +- src/cmake/modules/GUDHI_user_version_target.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ext/hera b/ext/hera index cb1838e6..0019cae9 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 +Subproject commit 0019cae9dc1e9d11aa03bc59681435ba7f21eea8 diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 6db2c76b..2d010483 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -68,7 +68,7 @@ if(CGAL_FOUND) endif() # For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera. -set(HERA_WASSERSTEIN_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include CACHE PATH "Directory where one can find Hera's wasserstein.h") +set(HERA_WASSERSTEIN_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include CACHE PATH "Directory where one can find Hera's wasserstein.h") option(WITH_GUDHI_USE_TBB "Build with Intel TBB parallelization" ON) diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake index 5047252f..257d1939 100644 --- a/src/cmake/modules/GUDHI_user_version_target.cmake +++ b/src/cmake/modules/GUDHI_user_version_target.cmake @@ -55,7 +55,7 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI) add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E - copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include ${GUDHI_USER_VERSION_DIR}/ext/hera/geom_matching/wasserstein/include) + copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include ${GUDHI_USER_VERSION_DIR}/ext/hera/wasserstein/include) set(GUDHI_DIRECTORIES "doc;example;concept;utilities") -- cgit v1.2.3 From ba24c58487f9a62e024138127c1b8375449334f9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 13 Mar 2020 09:17:25 +0100 Subject: Mention git submodule sync --- .github/how_to_use_github_to_contribute_to_gudhi.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md index 6ab05e36..747ca39b 100644 --- a/.github/how_to_use_github_to_contribute_to_gudhi.md +++ b/.github/how_to_use_github_to_contribute_to_gudhi.md @@ -25,10 +25,10 @@ This creates a directory gudhi-devel, which you are free to move around or renam cd gudhi-devel ``` -Everytime you clone the repository, you will have to download the *submodules*. +When you clone the repository, you also need to download the *submodules*. ## Submodules -An interface to Hera for Wasserstein distance is available on an external git repository. To download it: +Hera, used for Wasserstein distance, is available on an external git repository. To download it: ```bash git submodule update --init ``` @@ -60,8 +60,9 @@ This is a command you can run quite regularly. It tells git to check all that happened on github. It is safe, it will not mess with your files. -**Reminder:** Everytime you checkout master or merge from master, afterwards, if the version of one the submodule has changed, or if a submodule was added, you will have to: +**Reminder:** If the version of a submodule has changed, or if a submodule was added, you may need to: ```bash +git submodule sync git submodule update --init ``` -- cgit v1.2.3 From d239af744539572b485b09031f60121383fc1bc6 Mon Sep 17 00:00:00 2001 From: mathieu Date: Fri, 13 Mar 2020 11:31:19 -0400 Subject: tried to fix hera's update --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index 9a899718..cb1838e6 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit 9a89971855acefe39dce0e2adadf53b88ca8f683 +Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 -- cgit v1.2.3 From 4fb8ab586088dd582b3949cecc11395c37b6f3e6 Mon Sep 17 00:00:00 2001 From: mathieu Date: Fri, 13 Mar 2020 11:32:22 -0400 Subject: tried to fix hera's update --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index 0019cae9..cb1838e6 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit 0019cae9dc1e9d11aa03bc59681435ba7f21eea8 +Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 -- cgit v1.2.3 From e313e98661a54accafd6649ab274aa17cf7e4fb2 Mon Sep 17 00:00:00 2001 From: mathieu Date: Fri, 13 Mar 2020 11:56:43 -0400 Subject: fix hera --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index cb1838e6..0019cae9 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 +Subproject commit 0019cae9dc1e9d11aa03bc59681435ba7f21eea8 -- cgit v1.2.3 From 6410abe3788e17a24b1569bcd7f121d126e1c6cc Mon Sep 17 00:00:00 2001 From: mathieu Date: Fri, 13 Mar 2020 11:58:25 -0400 Subject: fix hera --- ext/hera | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/hera b/ext/hera index cb1838e6..0019cae9 160000 --- a/ext/hera +++ b/ext/hera @@ -1 +1 @@ -Subproject commit cb1838e682ec07f80720241cf9098400caeb83c7 +Subproject commit 0019cae9dc1e9d11aa03bc59681435ba7f21eea8 -- cgit v1.2.3 From 4b546a43fe14178dcfb2b327e27a580fc9811499 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 13:16:04 +0100 Subject: update doc (indentation, mention of -1 for the diag) and added a few more tests --- src/python/gudhi/barycenter.py | 30 +++++++++++++------------- src/python/test/test_wasserstein_barycenter.py | 15 +++++++------ 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index a41b5906..3af12c14 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -96,9 +96,8 @@ def _optimal_matching(X, Y, withcost=False): def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' :param pdiagset: a list of size m containing numpy.array of shape (n x 2) - (n can variate), encoding a set of - persistence diagrams with only finite coordinates. - If empty, returns None. + (n can variate), encoding a set of + persistence diagrams with only finite coordinates. :param init: The initial value for barycenter estimate. If None, init is made on a random diagram from the dataset. Otherwise, it must be an int @@ -106,24 +105,25 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): or a (n x 2) numpy.array enconding a persistence diagram with n points. :param verbose: if True, returns additional information about the - barycenter. + barycenter. :returns: If not verbose (default), a numpy.array encoding - the barycenter estimate + the barycenter estimate of pdiagset (local minima of the energy function). + If pdiagset is empty, returns None. If verbose, returns a couple (Y, log) where Y is the barycenter estimate, and log is a dict that contains additional informations: - groupings, a list of list of pairs (i,j), - That is, G[k] = [(i, j) ...], where (i,j) indicates - that X[i] is matched to Y[j] - if i > len(X) or j > len(Y), it means they - represent the diagonal. + That is, G[k] = [(i, j) ...], where (i,j) indicates + that X[i] is matched to Y[j] + if i = -1 or j = -1, it means they + represent the diagonal. - energy, a float representing the Frechet - energy value obtained, - that is the mean of squared distances - of observations to the output. + energy value obtained, + that is the mean of squared distances + of observations to the output. - nb_iter, integer representing the number of iterations - performed before convergence of the algorithm. + performed before convergence of the algorithm. ''' X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging @@ -136,7 +136,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): # Initialisation of barycenter if init is None: i0 = np.random.randint(m) # Index of first state for the barycenter - Y = X[i0].copy() #copy() ensure that we do not modify X[i0] + Y = X[i0].copy() else: if type(init)==int: Y = X[init].copy() @@ -149,7 +149,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): while not converged: nb_iter += 1 K = len(Y) # current nb of points in Y (some might be on diagonal) - G = np.zeros((K, m), dtype=int)-1 # will store for each j, the (index) + G = np.full((K, m), -1, dtype=int) # will store for each j, the (index) # point matched in each other diagram #(might be the diagonal). # that is G[j, i] = k <=> y_j is matched to diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index a58a4d62..5167cb84 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -27,19 +27,20 @@ def test_lagrangian_barycenter(): res = np.array([[0.27916667, 0.55416667], [0.7375, 0.7625], [0.2375, 0.2625]]) dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) - dg8 = np.array([[0., 4.]]) + dg8 = np.array([[0., 4.], [4, 8]]) # error crit. - eps = 0.000001 + eps = 1e-7 assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2))) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True) - assert np.linalg.norm(Y - np.array([[1,3]])) < eps - assert np.abs(log["energy"] - 2) < eps - assert np.array_equal(log["groupings"][0] , np.array([[0, -1]])) - assert np.array_equal(log["groupings"][1] , np.array([[0, 0]])) - assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3]])) < eps + assert np.linalg.norm(Y - np.array([[1,3], [5, 7]])) < eps + assert np.abs(log["energy"] - 4) < eps + assert np.array_equal(log["groupings"][0] , np.array([[0, -1], [1, -1]])) + assert np.array_equal(log["groupings"][1] , np.array([[0, 0], [1, 1]])) + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3], [5, 7]])) < eps assert lagrangian_barycenter(pdiagset = []) is None + -- cgit v1.2.3 From aa93247860bb01e3fc15926658dd9e6a95198f3d Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 13:18:58 +0100 Subject: added mention that _optimal matching should be removed at some point --- src/python/gudhi/barycenter.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 3af12c14..517cdb2f 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -44,6 +44,9 @@ def _optimal_matching(X, Y, withcost=False): if i >= len(X) or j >= len(Y), it means they represent the diagonal. They will be encoded by -1 afterwards. + + NOTE : this code will be removed for final merge, + and wasserstein.optimal_matching will be used instead. ''' n = len(X) -- cgit v1.2.3 From 6ed2a97421a223b4ebe31b91f48d779c2209f470 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 16 Mar 2020 13:38:18 +0100 Subject: Add get_simplices method - contrary to get_filtration method, sort is not performed --- src/Simplex_tree/example/simple_simplex_tree.cpp | 13 +++++++++++-- src/python/example/simplex_tree_example.py | 4 ++++ src/python/gudhi/simplex_tree.pxd | 8 ++++++++ src/python/gudhi/simplex_tree.pyx | 17 ++++++++++++++++- src/python/include/Simplex_tree_interface.h | 11 +++++++++++ src/python/test/test_simplex_tree.py | 12 ++++++++++++ 6 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp index 4353939f..47ea7e36 100644 --- a/src/Simplex_tree/example/simple_simplex_tree.cpp +++ b/src/Simplex_tree/example/simple_simplex_tree.cpp @@ -166,10 +166,19 @@ int main(int argc, char* const argv[]) { // ++ GENERAL VARIABLE SET std::cout << "********************************************************************\n"; - // Display the Simplex_tree - Can not be done in the middle of 2 inserts std::cout << "* The complex contains " << simplexTree.num_simplices() << " simplices\n"; std::cout << " - dimension " << simplexTree.dimension() << "\n"; - std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n"; + std::cout << "* Iterator on simplices, with [filtration value]:\n"; + for (Simplex_tree::Simplex_handle f_simplex : simplexTree.complex_simplex_range()) { + std::cout << " " + << "[" << simplexTree.filtration(f_simplex) << "] "; + for (auto vertex : simplexTree.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")"; + std::cout << std::endl; + } + + std::cout << "********************************************************************\n"; + // Can not be done in the middle of 2 inserts + std::cout << "* Iterator on simplices sorted by filtration values, with [filtration value]:\n"; for (auto f_simplex : simplexTree.filtration_simplex_range()) { std::cout << " " << "[" << simplexTree.filtration(f_simplex) << "] "; diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py index 7f20c389..34833899 100755 --- a/src/python/example/simplex_tree_example.py +++ b/src/python/example/simplex_tree_example.py @@ -38,6 +38,10 @@ else: print("dimension=", st.dimension()) +print("simplices=") +for simplex_with_filtration in st.get_simplices(): + print("(%s, %.2f)" % tuple(simplex_with_filtration)) + st.initialize_filtration() print("filtration=") for simplex_with_filtration in st.get_filtration(): diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 66c173a6..82f155de 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -24,6 +24,12 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface::Simplex_handle": pass + cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface::Complex_simplex_iterator": + Simplex_tree_simplices_iterator() + Simplex_tree_simplex_handle& operator*() + Simplex_tree_simplices_iterator operator++() + bint operator!=(Simplex_tree_simplices_iterator) + cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface::Skeleton_simplex_iterator": Simplex_tree_skeleton_iterator() Simplex_tree_simplex_handle& operator*() @@ -53,6 +59,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool make_filtration_non_decreasing() # Iterators over Simplex tree pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) + Simplex_tree_simplices_iterator get_simplices_iterator_begin() + Simplex_tree_simplices_iterator get_simplices_iterator_end() vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index efac2d80..c01cc905 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -208,10 +208,25 @@ cdef class SimplexTree: return self.get_ptr().insert_simplex_and_subfaces(csimplex, filtration) - def get_filtration(self): + def get_simplices(self): """This function returns a generator with simplices and their given filtration values. + :returns: The simplices. + :rtype: generator with tuples(simplex, filtration) + """ + cdef Simplex_tree_simplices_iterator it = self.get_ptr().get_simplices_iterator_begin() + cdef Simplex_tree_simplices_iterator end = self.get_ptr().get_simplices_iterator_end() + cdef Simplex_tree_simplex_handle sh = dereference(it) + + while it != end: + yield self.get_ptr().get_simplex_and_filtration(dereference(it)) + preincrement(it) + + def get_filtration(self): + """This function returns a generator with simplices and their given + filtration values sorted by increasing filtration values. + :returns: The simplices sorted by increasing filtration values. :rtype: generator with tuples(simplex, filtration) """ diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 66ce5afd..4a7062d6 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -36,6 +36,7 @@ class Simplex_tree_interface : public Simplex_tree { using Simplex_and_filtration = std::pair; using Filtered_simplices = std::vector; using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator; + using Complex_simplex_iterator = typename Base::Complex_simplex_iterator; public: bool find_simplex(const Simplex& vh) { @@ -122,6 +123,16 @@ class Simplex_tree_interface : public Simplex_tree { } // Iterator over the simplex tree + Complex_simplex_iterator get_simplices_iterator_begin() { + // this specific case works because the range is just a pair of iterators - won't work if range was a vector + return Base::complex_simplex_range().begin(); + } + + Complex_simplex_iterator get_simplices_iterator_end() { + // this specific case works because the range is just a pair of iterators - won't work if range was a vector + return Base::complex_simplex_range().end(); + } + typename std::vector::const_iterator get_filtration_iterator_begin() { // Base::initialize_filtration(); already performed in filtration_simplex_range // this specific case works because the range is just a pair of iterators - won't work if range was a vector diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 04b26e92..f7848379 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -249,3 +249,15 @@ def test_make_filtration_non_decreasing(): assert st.filtration([3, 4, 5]) == 2.0 assert st.filtration([3, 4]) == 2.0 assert st.filtration([4, 5]) == 2.0 + +def test_simplices_iterator(): + st = SimplexTree() + + assert st.insert([0, 1, 2], filtration=4.0) == True + assert st.insert([2, 3, 4], filtration=2.0) == True + + for simplex in st.get_simplices(): + print("simplex is: ", simplex[0]) + assert st.find(simplex[0]) == True + print("filtration is: ", simplex[1]) + assert st.filtration(simplex[0]) == simplex[1] -- cgit v1.2.3 From 3099b2395fa143aa6c9b3df2c6087ccd017ff87c Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Mon, 16 Mar 2020 12:51:34 -0400 Subject: fixed doc --- src/python/gudhi/representations/kernel_methods.py | 45 +++++++++------- src/python/gudhi/representations/metrics.py | 63 +++++++++++++--------- 2 files changed, 66 insertions(+), 42 deletions(-) diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py index d89f69ab..50186d63 100644 --- a/src/python/gudhi/representations/kernel_methods.py +++ b/src/python/gudhi/representations/kernel_methods.py @@ -20,13 +20,16 @@ from .preprocessing import Padding def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details. - :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). - :param D2: (m x 2) numpy.array encoding the second diagram. - :param bandwidth: bandwidth of the Gaussian kernel with which persistence diagrams will be convolved - :param weight: weight function for the persistence diagram points. This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y]. - :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). - :returns: the persistence weighted Gaussian kernel value between persistence diagrams. - :rtype: float + + Parameters: + D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + D2: (m x 2) numpy.array encoding the second diagram. + bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved + weight: weight function for the persistence diagram points. This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y]. + kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + + Returns: + float: the persistence weighted Gaussian kernel value between persistence diagrams. """ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))]) ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))]) @@ -42,12 +45,15 @@ def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_appr def persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details. - :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). - :param D2: (m x 2) numpy.array encoding the second diagram. - :param bandwidth: bandwidth of the Gaussian kernel with which persistence diagrams will be convolved - :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). - :returns: the persistence scale space kernel value between persistence diagrams. - :rtype: float + + Parameters: + D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + D2: (m x 2) numpy.array encoding the second diagram. + bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved + kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + + Returns: + float: the persistence scale space kernel value between persistence diagrams. """ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0) DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0) @@ -57,11 +63,14 @@ def persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): def pairwise_persistence_diagram_kernels(X, Y=None, metric="sliced_wasserstein", **kwargs): """ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). - :param X: first list of persistence diagrams. - :param Y: second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only. - :param metric: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. - :returns: kernel matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) - :rtype: float + + Parameters: + X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. + Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only. + metric: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + + Returns: + numpy array of shape (nxm): kernel matrix. """ XX = np.reshape(np.arange(len(X)), [-1,1]) YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 4070c321..e2c30f8c 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -20,11 +20,14 @@ from .preprocessing import Padding def sliced_wasserstein_distance(D1, D2, num_directions): """ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. - :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). - :param D2: (m x 2) numpy.array encoding the second diagram. - :param num_directions: number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. - :returns: the sliced Wasserstein distance between persistence diagrams. - :rtype: float + + Parameters: + D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). + D2: (m x 2) numpy.array encoding the second diagram. + num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. + + Returns: + float: the sliced Wasserstein distance between persistence diagrams. """ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) @@ -42,10 +45,13 @@ def sliced_wasserstein_distance(D1, D2, num_directions): def compute_persistence_diagram_projections(X, num_directions): """ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams. - :param X: list of persistence diagrams. - :param num_directions: number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. - :returns: list of projected persistence diagrams. - :rtype: float + + Parameters: + X (list of n numpy arrays of shape (numx2)): list of persistence diagrams. + num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. + + Returns: + XX (list of n numpy arrays of shape (2*numx2)): list of projected persistence diagrams. """ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) @@ -55,10 +61,13 @@ def compute_persistence_diagram_projections(X, num_directions): def sliced_wasserstein_distance_on_projections(D1, D2): """ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. - :param D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections. - :param D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections. - :returns: the sliced Wasserstein distance between the projected persistence diagrams. - :rtype: float + + Parameters: + D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections. + D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections. + + Returns: + float: the sliced Wasserstein distance between the projected persistence diagrams. """ lim1, lim2 = int(len(D1)/2), int(len(D2)/2) approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:] @@ -70,12 +79,15 @@ def sliced_wasserstein_distance_on_projections(D1, D2): def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. - :param D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). - :param D2: (m x 2) numpy.array encoding the second diagram. - :param bandwidth: bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions. - :param kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). - :returns: the persistence Fisher distance between persistence diagrams. - :rtype: float + + Parameters: + D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate). + D2: (m x 2) numpy.array encoding the second diagram. + bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions. + kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). + + Returns: + float: the persistence Fisher distance between persistence diagrams. """ projection = (1./2) * np.ones((2,2)) diagonal_projections1 = np.matmul(D1, projection) @@ -127,11 +139,14 @@ PAIRWISE_DISTANCE_FUNCTIONS = { def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs): """ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). - :param X: first list of persistence diagrams. - :param Y: second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. - :param metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. - :returns: distance matrix, i.e., numpy array of shape (num diagrams 1 x num diagrams 2) - :rtype: float + + Parameters: + X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. + Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. + metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + + Returns: + numpy array of shape (nxm): distance matrix """ XX = np.reshape(np.arange(len(X)), [-1,1]) YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) -- cgit v1.2.3 From 5c55e976606b4dd020bd4e21c93ae22143ef5348 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 18:01:16 +0100 Subject: changed doc of matchings for a more explicit (and hopefully sphinx-valid) version --- src/python/doc/wasserstein_distance_user.rst | 29 ++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 9519caa6..4c3b53dd 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -58,16 +58,29 @@ An index of -1 represents the diagonal. import gudhi.wasserstein import numpy as np - diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) - diag2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]]) - cost, matching = gudhi.wasserstein.wasserstein_distance(diag1, diag2, matching=True, order=1., internal_p=2.) - - message = "Wasserstein distance value = %.2f, optimal matching: %s" %(cost, matching) - print(message) + dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) + dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]]) + cost, matchings = gudhi.wasserstein.wasserstein_distance(diag1, diag2, matching=True, order=1., internal_p=2.) + + message_cost = "Wasserstein distance value = %.2f" %cost + print(message_cost) + dgm1_to_diagonal = matchings[np.where(matchings[:,0] == -1)][:,1] + dgm2_to_diagonal = matchings[np.where(matchings[:,1] == -1)][:,0] + off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0) + + for i,j in off_diagonal_match: + print("point %s in dgm1 is matched to point %s in dgm2" %(i,j)) + for i in dgm1_to_diagonal: + print("point %s in dgm1 is matched to the diagonal" %i) + for j in dgm2_to_diagonal: + print("point %s in dgm2 is matched to the diagonal" %j) The output is: .. testoutput:: - Wasserstein distance value = 2.15, optimal matching: [[0, 0], [1, 2], [2, -1], [-1, 1]] - + Wasserstein distance value = 2.15 + point 0 in dgm1 is matched to point 0 in dgm2 + point 1 in dgm1 is matched to point 2 in dgm2 + point 2 in dgm1 is matched to the diagonal + point 1 in dgm2 is matched to the diagonal -- cgit v1.2.3 From 66f0b08a8f8d5006f8d29352c169525cc53a22e6 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 19:11:30 +0100 Subject: changed typo in doc (diag --> dgm), used integer for order and internal p, simplify th ecode --- src/python/doc/wasserstein_distance_user.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 4c3b53dd..f43b2217 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -36,10 +36,10 @@ Note that persistence diagrams must be submitted as (n x 2) numpy arrays and mus import gudhi.wasserstein import numpy as np - diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) - diag2 = np.array([[2.8, 4.45],[9.5, 14.1]]) + dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) + dgm2 = np.array([[2.8, 4.45],[9.5, 14.1]]) - message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(diag1, diag2, order=1., internal_p=2.) + message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, order=1., internal_p=2.) print(message) The output is: @@ -60,12 +60,12 @@ An index of -1 represents the diagonal. dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]]) - cost, matchings = gudhi.wasserstein.wasserstein_distance(diag1, diag2, matching=True, order=1., internal_p=2.) + cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, matching=True, order=1, internal_p=2) message_cost = "Wasserstein distance value = %.2f" %cost print(message_cost) - dgm1_to_diagonal = matchings[np.where(matchings[:,0] == -1)][:,1] - dgm2_to_diagonal = matchings[np.where(matchings[:,1] == -1)][:,0] + dgm1_to_diagonal = matching[matching[:,0] == -1, 1] + dgm2_to_diagonal = matching[matching[:,1] == -1, 0] off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0) for i,j in off_diagonal_match: -- cgit v1.2.3 From a253c0c4f54a9a148740ed9c20457df0ea43c842 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 19:36:07 +0100 Subject: correction typo in usr.rst --- src/python/doc/wasserstein_distance_user.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index f43b2217..25e51d68 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -64,8 +64,8 @@ An index of -1 represents the diagonal. message_cost = "Wasserstein distance value = %.2f" %cost print(message_cost) - dgm1_to_diagonal = matching[matching[:,0] == -1, 1] - dgm2_to_diagonal = matching[matching[:,1] == -1, 0] + dgm1_to_diagonal = matchings[matchings[:,0] == -1, 1] + dgm2_to_diagonal = matchings[matchings[:,1] == -1, 0] off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0) for i,j in off_diagonal_match: -- cgit v1.2.3 From 60d11e3f06e08b66e49997f389c4dc01b00b793f Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Mar 2020 21:17:38 +0100 Subject: correction of typo in usr.rst --- src/python/doc/wasserstein_distance_user.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 25e51d68..a9b21fa5 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -64,8 +64,8 @@ An index of -1 represents the diagonal. message_cost = "Wasserstein distance value = %.2f" %cost print(message_cost) - dgm1_to_diagonal = matchings[matchings[:,0] == -1, 1] - dgm2_to_diagonal = matchings[matchings[:,1] == -1, 0] + dgm1_to_diagonal = matchings[matchings[:,1] == -1, 0] + dgm2_to_diagonal = matchings[matchings[:,0] == -1, 1] off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0) for i,j in off_diagonal_match: -- cgit v1.2.3 From 6e289999fab86bf06cd69c5b7b846c4f26e0a525 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 17 Mar 2020 00:13:32 -0400 Subject: fixes --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 74 +++++++++++++++------------ src/python/test/test_simplex_tree.py | 12 ++--- 2 files changed, 47 insertions(+), 39 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 7be14bce..02f2c7e9 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1354,6 +1354,7 @@ class Simplex_tree { // Replacing if(f=max)) would mean that if f is NaN, we replace it with the max of the children. // That seems more useful than keeping NaN. if (!(simplex.second.filtration() >= max_filt_border_value)) { + // Store the filtration modification information modified = true; simplex.second.assign_filtration(max_filt_border_value); @@ -1473,15 +1474,21 @@ class Simplex_tree { /** \brief Retrieve good values for extended persistence, and separate the * diagrams into the ordinary, relative, extended+ and extended- subdiagrams. - * Need extend_filtration to be called first! + * \post This function should be called only if extend_filtration has been called first! + * \post The coordinates of the persistence diagram points might be a little different than the + * original filtration values due to the internal transformation (scaling to [-2,-1]) that is + * performed on these values during the computation of extended persistence. * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration * and this->get_persistence. * @return A vector of four persistence diagrams. The first one is Ordinary, the * second one is Relative, the third one is Extended+ and the fourth one is Extended-. + * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. */ std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ std::vector>>> new_dgm(4); double x, y; + double minval_ = this->minval_; + double maxval_ = this->maxval_; for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; double px = dgm[i].second.first; @@ -1516,69 +1523,70 @@ class Simplex_tree { /** \brief Extend filtration for computing extended persistence. * This function only uses the filtration values at the 0-dimensional simplices, * and computes the extended persistence diagram induced by the lower-star filtration - * computed with these values. Note that after calling this function, the filtration + * computed with these values. + * \post Note that after calling this function, the filtration * values are actually modified. The function compute_extended_persistence_subdiagrams * retrieves the original values and separates the extended persistence diagram points * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after * computing the persistent homology of the extended simplicial complex. + * \post Note that this code creates an extra vertex internally, so you should make sure that + * the Simplex tree does not contain a vertex with the largest Vertex_handle. */ void extend_filtration() { // Compute maximum and minimum of filtration values - int maxvert = -std::numeric_limits::infinity(); - std::vector filt; - for (auto sh : this->complex_simplex_range()) { - if (this->dimension(sh) == 0){ - filt.push_back(this->filtration(sh)); - maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert); - } + int maxvert = std::numeric_limits::min(); + this->minval_ = std::numeric_limits::max(); + this->maxval_ = std::numeric_limits::min(); + for (auto sh : this->skeleton_simplex_range(0)) { + double f = this->filtration(sh); + this->minval_ = std::min(this->minval_, f); + this->maxval_ = std::max(this->maxval_, f); + maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert); } - minval_ = *std::min_element(filt.begin(), filt.end()); - maxval_ = *std::max_element(filt.begin(), filt.end()); + + assert (maxvert < std::numeric_limits::max()); maxvert += 1; - // Compute vectors of integers corresponding to the Simplex handles - std::vector > splxs; - for (auto sh : this->complex_simplex_range()) { - std::vector vr; - for (auto vh : this->simplex_vertex_range(sh)){ - vr.push_back(vh); - } - splxs.push_back(vr); - } + Simplex_tree* st_copy = new Simplex_tree(*this); // Add point for coning the simplicial complex int count = this->num_simplices(); - std::vector cone; - cone.push_back(maxvert); - auto ins = this->insert_simplex(cone, -3); - this->assign_key(ins.first, count); + this->insert_simplex({maxvert}, -3); count++; // For each simplex - for (auto vr : splxs){ + for (auto sh_copy : st_copy->complex_simplex_range()){ + + // Locate simplex + std::vector vr; + for (auto vh : st_copy->simplex_vertex_range(sh_copy)){ + vr.push_back(vh); + } + auto sh = this->find(vr); + // Create cone on simplex - auto sh = this->find(vr); vr.push_back(maxvert); + vr.push_back(maxvert); if (this->dimension(sh) == 0){ // Assign ascending value between -2 and -1 to vertex double v = this->filtration(sh); - this->assign_filtration(sh, -2 + (v-minval_)/(maxval_-minval_)); + this->assign_filtration(sh, -2 + (v-this->minval_)/(this->maxval_-this->minval_)); // Assign descending value between 1 and 2 to cone on vertex - auto ins = this->insert_simplex(vr, 2 - (v-minval_)/(maxval_-minval_)); - this->assign_key(ins.first, count); + this->insert_simplex(vr, 2 - (v-this->minval_)/(this->maxval_-this->minval_)); } else{ // Assign value -3 to simplex and cone on simplex this->assign_filtration(sh, -3); - auto ins = this->insert_simplex(vr, -3); - this->assign_key(ins.first, count); + this->insert_simplex(vr, -3); } count++; } - this->make_filtration_non_decreasing(); - this->initialize_filtration(); + // Deallocate memory + delete st_copy; + // Automatically assign good values for simplices + this->make_filtration_non_decreasing(); } diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index caefeb9c..96ec4707 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -245,6 +245,10 @@ def test_make_filtration_non_decreasing(): assert st.filtration([0, 1, 6]) == 1.0 assert st.filtration([0, 1]) == 1.0 assert st.filtration([0]) == 1.0 + assert st.filtration([1]) == 1.0 + assert st.filtration([3, 4, 5]) == 2.0 + assert st.filtration([3, 4]) == 2.0 + assert st.filtration([4, 5]) == 2.0 def test_extend_filtration(): @@ -271,7 +275,7 @@ def test_extend_filtration(): st.assign_filtration([4], 5.) st.assign_filtration([5], 6.) - assert st.get_filtration() == [ + assert list(st.get_filtration()) == [ ([0, 2], 0.0), ([1, 2], 0.0), ([0, 3], 0.0), @@ -289,7 +293,7 @@ def test_extend_filtration(): st.extend_filtration() - assert st.get_filtration() == [ + assert list(st.get_filtration()) == [ ([6], -3.0), ([0], -2.0), ([1], -1.8), @@ -327,10 +331,6 @@ def test_extend_filtration(): [(1, (6.0, 1.0))] ] - assert st.filtration([1]) == 1.0 - assert st.filtration([3, 4, 5]) == 2.0 - assert st.filtration([3, 4]) == 2.0 - assert st.filtration([4, 5]) == 2.0 def test_simplices_iterator(): st = SimplexTree() -- cgit v1.2.3 From a52e84fdcdbf66f3542416499c26245d0435a8fb Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 17 Mar 2020 00:48:54 -0400 Subject: fix test --- src/python/test/test_simplex_tree.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 96ec4707..63eee9a5 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -292,6 +292,7 @@ def test_extend_filtration(): st.extend_filtration() + st.initialize_filtration() assert list(st.get_filtration()) == [ ([6], -3.0), -- cgit v1.2.3 From cdc57712ca159f3044453cef41e31ebc03617a1b Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 17 Mar 2020 10:55:14 +0100 Subject: removed _optimal_matching from barycenter as it is now handled by wasserstein_distance. --- src/python/gudhi/barycenter.py | 85 +++----------------------- src/python/test/test_wasserstein_barycenter.py | 2 +- 2 files changed, 9 insertions(+), 78 deletions(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 517cdb2f..0490fdd1 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -12,8 +12,7 @@ import ot import numpy as np import scipy.spatial.distance as sc -from gudhi.wasserstein import _build_dist_matrix, _perstot - +from gudhi.wasserstein import wasserstein_distance, _perstot def _mean(x, m): @@ -32,70 +31,6 @@ def _mean(x, m): return np.array([0, 0]) -def _optimal_matching(X, Y, withcost=False): - ''' - :param X: numpy.array of size (n x 2) - :param Y: numpy.array of size (m x 2) - :param withcost: returns also the cost corresponding to the optimal matching - :returns: numpy.array of shape (k x 2) encoding the list of edges - in the optimal matching. - That is, [[i, j] ...], where (i,j) indicates - that X[i] is matched to Y[j] - if i >= len(X) or j >= len(Y), it means they - represent the diagonal. - They will be encoded by -1 afterwards. - - NOTE : this code will be removed for final merge, - and wasserstein.optimal_matching will be used instead. - ''' - - n = len(X) - m = len(Y) - # Start by handling empty diagrams. Could it be shorten? - if X.size == 0: # X is empty - if Y.size == 0: # Y is empty - res = np.array([[0,0]]) # the diagonal is matched to the diagonal - if withcost: - return res, 0 - else: - return res - else: # X is empty but not Y - res = np.array([[0, i] for i in range(m)]) - cost = _perstot(Y, order=2, internal_p=2)**2 - if withcost: - return res, cost - else: - return res - elif Y.size == 0: # X is not empty but Y is empty - res = np.array([[i,0] for i in range(n)]) - cost = _perstot(X, order=2, internal_p=2)**2 - if withcost: - return res, cost - else: - return res - - # we know X, Y are not empty diags now - M = _build_dist_matrix(X, Y, order=2, internal_p=2) - - a = np.ones(n+1) - a[-1] = m - b = np.ones(m+1) - b[-1] = n - P = ot.emd(a=a, b=b, M=M) - # Note : it seems POT returns a permutation matrix in this situation, - # ie a vertex of the constraint set (generically true). - if withcost: - cost = np.sum(np.multiply(P, M)) - P[P < 0.5] = 0 # dirty trick to avoid some numerical issues... to improve. - res = np.argwhere(P) - - # return the list of (i,j) such that P[i,j] > 0, - #i.e. x_i is matched to y_j (should it be the diag). - if withcost: - return res, cost - return res - - def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' :param pdiagset: a list of size m containing numpy.array of shape (n x 2) @@ -166,16 +101,15 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): # Step 1 : compute optimal matching (Y, X_i) for each X_i # and create new points in Y if needed for i in range(m): - indices = _optimal_matching(Y, X[i]) + _, indices = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) for y_j, x_i_j in indices: - if y_j < K: # we matched an off diagonal point to x_i_j... - # ...which is also an off-diagonal point. - if x_i_j < nb_off_diag[i]: + if y_j >= 0: # we matched an off diagonal point to x_i_j... + if x_i_j >= 0: # ...which is also an off-diagonal point. G[y_j, i] = x_i_j else: # ...which is a diagonal point G[y_j, i] = -1 # -1 stands for the diagonal (mask) else: # We matched a diagonal point to x_i_j... - if x_i_j < nb_off_diag[i]: # which is a off-diag point ! + if x_i_j >= 0: # which is a off-diag point ! # need to create new point in Y new_y = _mean(np.array([X[i][x_i_j]]), m) # Average this point with (m-1) copies of Delta @@ -209,15 +143,12 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): log = {} n_y = len(Y) for i in range(m): - edges, cost = _optimal_matching(Y, X[i], withcost=True) - n_x = len(X[i]) - G = edges[np.where(edges[:,0]= n_x) - G[idx,1] = -1 # -1 will encode the diagonal - groupings.append(G) + cost, edges = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) + groupings.append(edges) energy += cost log["groupings"] = groupings energy = energy/m + print(energy) log["energy"] = energy log["nb_iter"] = nb_iter diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index 5167cb84..4d18616b 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -38,7 +38,7 @@ def test_lagrangian_barycenter(): assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True) assert np.linalg.norm(Y - np.array([[1,3], [5, 7]])) < eps - assert np.abs(log["energy"] - 4) < eps + assert np.abs(log["energy"] - 2) < eps assert np.array_equal(log["groupings"][0] , np.array([[0, -1], [1, -1]])) assert np.array_equal(log["groupings"][1] , np.array([[0, 0], [1, 1]])) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3], [5, 7]])) < eps -- cgit v1.2.3 From 47e72c7250bff568735df829a40bcbea0a48f7c2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 17 Mar 2020 13:28:35 +0100 Subject: Remove code that was commented out. --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index ad592a92..af711075 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1494,16 +1494,8 @@ class Simplex_tree { * * \pre `sh` must have dimension at least 1. */ Simplex_handle edge_with_same_filtration(Simplex_handle sh) { -#if 0 - // FIXME: Only do this if dim >= 2, since we don't want to return a vertex... - // Test if we are lucky and the parent has the same filtration value. - Siblings* sib = self_siblings(sh); - Vertex_handle v_par = sib->parent(); - sib = sib->oncles(); - Simplex_handle par = sib->find(v_par); - if(filtration_(par) == filt) return edge_with_same_filtration(par); -#endif - auto&& vertices = simplex_vertex_range(sh); + // See issue #251 for potential speed improvements. + auto&& vertices = simplex_vertex_range(sh); // vertices in decreasing order auto end = std::end(vertices); auto vi = std::begin(vertices); GUDHI_CHECK(vi != end, "empty simplex"); -- cgit v1.2.3 From e16ca3a14de72e304d87a4c11b6115e18df899fa Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 17 Mar 2020 16:27:58 +0100 Subject: Fix #249 --- .github/test-requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt index bd03f98e..18882792 100644 --- a/.github/test-requirements.txt +++ b/.github/test-requirements.txt @@ -5,4 +5,5 @@ sphinx-paramlinks matplotlib scipy scikit-learn -POT \ No newline at end of file +POT +tensorflow \ No newline at end of file -- cgit v1.2.3 From 58d923b13afb9b18a2d5b028c6575baee691d182 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 17 Mar 2020 12:14:49 -0400 Subject: update python doc --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 8 +++---- src/python/gudhi/simplex_tree.pyx | 34 +++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 02f2c7e9..f661f687 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1478,8 +1478,8 @@ class Simplex_tree { * \post The coordinates of the persistence diagram points might be a little different than the * original filtration values due to the internal transformation (scaling to [-2,-1]) that is * performed on these values during the computation of extended persistence. - * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration - * and this->get_persistence. + * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration, + * this->initialize_filtration, and this->compute_persistent_cohomology. * @return A vector of four persistence diagrams. The first one is Ordinary, the * second one is Relative, the third one is Extended+ and the fourth one is Extended-. * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. @@ -1538,14 +1538,14 @@ class Simplex_tree { int maxvert = std::numeric_limits::min(); this->minval_ = std::numeric_limits::max(); this->maxval_ = std::numeric_limits::min(); - for (auto sh : this->skeleton_simplex_range(0)) { + for (auto sh = root_.members().begin(); sh != root_.members().end(); ++sh){ double f = this->filtration(sh); this->minval_ = std::min(this->minval_, f); this->maxval_ = std::max(this->maxval_, f); maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert); } - assert (maxvert < std::numeric_limits::max()); + GUDHI_CHECK(maxvert < std::numeric_limits::max(), std::invalid_argument("Simplex_tree contains a vertex with the largest Vertex_handle")); maxvert += 1; Simplex_tree* st_copy = new Simplex_tree(*this); diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 733ecb97..7af44683 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -397,19 +397,43 @@ cdef class SimplexTree: return self.get_ptr().make_filtration_non_decreasing() def extend_filtration(self): - """ Extend filtration for computing extended persistence. This function only uses the filtration values at the 0-dimensional simplices, and computes the extended persistence diagram induced by the lower-star filtration computed with these values. Note that after calling this function, the filtration values are actually modified. The function :func:`compute_extended_persistence_subdiagrams()` retrieves the original values and separates the extended persistence diagram points w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after computing the persistent homology of the extended simplicial complex. + """ Extend filtration for computing extended persistence. This function only uses the + filtration values at the 0-dimensional simplices, and computes the extended persistence + diagram induced by the lower-star filtration computed with these values. + + .. note:: + + Note that after calling this function, the filtration + values are actually modified within the Simplex_tree. + The function :func:`compute_extended_persistence_subdiagrams()` + retrieves the original values. + + .. note:: + + Note that this code creates an extra vertex internally, so you should make sure that + the Simplex_tree does not contain a vertex with the largest Vertex_handle. """ return self.get_ptr().extend_filtration() def compute_extended_persistence_subdiagrams(self, dgm): - """This function retrieves good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. + """This function retrieves good values for extended persistence, and separate the diagrams + into the ordinary, relative, extended+ and extended- subdiagrams. + + :param dgm: Persistence diagram obtained after calling :func:`extend_filtration()`, :func:`initialize_filtration()`, and :func:`persistence()`. + + :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + + .. note:: - :param dgm: Persistence diagram obtained after calling :func:`extend_filtration()` and :func:`persistence()`. - :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. + This function should be called only if :func:`extend_filtration()`, + :func:`initialize_filtration()`, + and :func:`persistence()` have been called first! .. note:: - This function should be called only after calling :func:`extend_filtration()` and :func:`persistence()`. + The coordinates of the persistence diagram points might be a little different than the + original filtration values due to the internal transformation (scaling to [-2,-1]) that is + performed on these values during the computation of extended persistence. """ return self.get_ptr().compute_extended_persistence_subdiagrams(dgm) -- cgit v1.2.3 From 18a0eb17d9370eca6dde7c0cada0624302ded002 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 17 Mar 2020 12:31:18 -0400 Subject: implement Marc's suggestions --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 1c06e7cb..5b36cc1c 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1552,13 +1552,13 @@ class Simplex_tree { double f = this->filtration(sh); this->minval_ = std::min(this->minval_, f); this->maxval_ = std::max(this->maxval_, f); - maxvert = std::max(*this->simplex_vertex_range(sh).begin(), maxvert); + maxvert = std::max(sh->first, maxvert); } GUDHI_CHECK(maxvert < std::numeric_limits::max(), std::invalid_argument("Simplex_tree contains a vertex with the largest Vertex_handle")); maxvert += 1; - Simplex_tree* st_copy = new Simplex_tree(*this); + Simplex_tree st_copy = *this; // Add point for coning the simplicial complex int count = this->num_simplices(); @@ -1566,11 +1566,11 @@ class Simplex_tree { count++; // For each simplex - for (auto sh_copy : st_copy->complex_simplex_range()){ + for (auto sh_copy : st_copy.complex_simplex_range()){ // Locate simplex std::vector vr; - for (auto vh : st_copy->simplex_vertex_range(sh_copy)){ + for (auto vh : st_copy.simplex_vertex_range(sh_copy)){ vr.push_back(vh); } auto sh = this->find(vr); @@ -1592,9 +1592,6 @@ class Simplex_tree { count++; } - // Deallocate memory - delete st_copy; - // Automatically assign good values for simplices this->make_filtration_non_decreasing(); } -- cgit v1.2.3 From 513f15705668c4da0b44506052d78a9721ef1b64 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 17 Mar 2020 17:55:43 +0100 Subject: Fix #224 --- .../include/gudhi/Persistent_cohomology.h | 24 ++++++---------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index 0f1876d0..b1ded5ae 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -566,15 +566,9 @@ class Persistent_cohomology { std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); bool has_infinity = std::numeric_limits::has_infinity; for (auto pair : persistent_pairs_) { - // Special case on windows, inf is "1.#INF" (cf. unitary tests and R package TDA) - if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits::infinity()) { - ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " " - << cpx_->filtration(get<0>(pair)) << " inf " << std::endl; - } else { - ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " " - << cpx_->filtration(get<0>(pair)) << " " - << cpx_->filtration(get<1>(pair)) << " " << std::endl; - } + ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " " + << cpx_->filtration(get<0>(pair)) << " " + << cpx_->filtration(get<1>(pair)) << " " << std::endl; } } @@ -584,15 +578,9 @@ class Persistent_cohomology { std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); bool has_infinity = std::numeric_limits::has_infinity; for (auto pair : persistent_pairs_) { - // Special case on windows, inf is "1.#INF" - if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits::infinity()) { - diagram_out << cpx_->dimension(get<0>(pair)) << " " - << cpx_->filtration(get<0>(pair)) << " inf" << std::endl; - } else { - diagram_out << cpx_->dimension(get<0>(pair)) << " " - << cpx_->filtration(get<0>(pair)) << " " - << cpx_->filtration(get<1>(pair)) << std::endl; - } + diagram_out << cpx_->dimension(get<0>(pair)) << " " + << cpx_->filtration(get<0>(pair)) << " " + << cpx_->filtration(get<1>(pair)) << std::endl; } } -- cgit v1.2.3 From b262406b0a75e39276c11f70ef1174981aa31b51 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 17 Mar 2020 17:57:17 +0100 Subject: Remove thread_local workaround --- src/Alpha_complex/include/gudhi/Alpha_complex_3d.h | 5 +-- src/Nerve_GIC/include/gudhi/GIC.h | 14 +------- .../include/gudhi/Persistent_cohomology.h | 5 +-- src/Simplex_tree/include/gudhi/Simplex_tree.h | 12 ++----- src/cmake/modules/GUDHI_compilation_flags.cmake | 37 ---------------------- src/python/CMakeLists.txt | 10 ------ 6 files changed, 5 insertions(+), 78 deletions(-) diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h index 7f96c94c..1486cefd 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h @@ -61,10 +61,7 @@ namespace Gudhi { namespace alpha_complex { -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL -thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001; +thread_local double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001; // Value_from_iterator returns the filtration value from an iterator on alpha shapes values // diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h index 2a6d4788..9a4c813d 100644 --- a/src/Nerve_GIC/include/gudhi/GIC.h +++ b/src/Nerve_GIC/include/gudhi/GIC.h @@ -139,19 +139,9 @@ class Cover_complex { for (boost::tie(ei, ei_end) = boost::edges(G); ei != ei_end; ++ei) boost::remove_edge(*ei, G); } - // Thread local is not available on XCode version < V.8 - // If not available, random engine is a class member. -#ifndef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::default_random_engine re; -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - // Find random number in [0,1]. double GetUniform() { - // Thread local is not available on XCode version < V.8 - // If available, random engine is defined for each thread. -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL thread_local std::default_random_engine re; -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL std::uniform_real_distribution Dist(0, 1); return Dist(re); } @@ -456,9 +446,7 @@ class Cover_complex { if (distances.size() == 0) compute_pairwise_distances(distance); - // This cannot be parallelized if thread_local is not defined - // thread_local is not defined for XCode < v.8 - #if defined(GUDHI_USE_TBB) && defined(GUDHI_CAN_USE_CXX11_THREAD_LOCAL) + #ifdef GUDHI_USE_TBB std::mutex deltamutex; tbb::parallel_for(0, N, [&](int i){ std::vector samples(m); diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index b1ded5ae..ca4bc10d 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -288,10 +288,7 @@ class Persistent_cohomology { // with multiplicity. We used to sum the coefficients directly in // annotations_in_boundary by using a map, we now do it later. typedef std::pair annotation_t; -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector annotations_in_boundary; + thread_local std::vector annotations_in_boundary; annotations_in_boundary.clear(); int sign = 1 - 2 * (dim_sigma % 2); // \in {-1,1} provides the sign in the // alternate sum in the boundary. diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index b7fb9002..2adc8354 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -765,12 +765,7 @@ class Simplex_tree { if (first == last) return { null_simplex(), true }; // FIXME: false would make more sense to me. - // Copy before sorting - // Thread local is not available on XCode version < V.8 - It will slow down computation -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector copy; + thread_local std::vector copy; copy.clear(); copy.insert(copy.end(), first, last); std::sort(copy.begin(), copy.end()); @@ -1133,10 +1128,7 @@ class Simplex_tree { Dictionary_it next = siblings->members().begin(); ++next; -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector > inter; + thread_local std::vector > inter; for (Dictionary_it s_h = siblings->members().begin(); s_h != siblings->members().end(); ++s_h, ++next) { Simplex_handle root_sh = find_vertex(s_h->first); diff --git a/src/cmake/modules/GUDHI_compilation_flags.cmake b/src/cmake/modules/GUDHI_compilation_flags.cmake index 34c2e065..567fbc40 100644 --- a/src/cmake/modules/GUDHI_compilation_flags.cmake +++ b/src/cmake/modules/GUDHI_compilation_flags.cmake @@ -1,7 +1,6 @@ # This files manage compilation flags required by GUDHI include(TestCXXAcceptsFlag) -include(CheckCXXSourceCompiles) # add a compiler flag only if it is accepted macro(add_cxx_compiler_flag _flag) @@ -12,32 +11,6 @@ macro(add_cxx_compiler_flag _flag) endif() endmacro() -function(can_cgal_use_cxx11_thread_local) - # This is because of https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/tss.h - # CGAL is using boost thread if thread_local is not ready (requires XCode 8 for Mac). - # The test in https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/config.h - # #if __has_feature(cxx_thread_local) || \ - # ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \ - # ( _MSC_VER >= 1900 ) - # #define CGAL_CAN_USE_CXX11_THREAD_LOCAL - # #endif - set(CGAL_CAN_USE_CXX11_THREAD_LOCAL " - int main() { - #ifndef __has_feature - #define __has_feature(x) 0 // Compatibility with non-clang compilers. - #endif - #if __has_feature(cxx_thread_local) || \ - ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \ - ( _MSC_VER >= 1900 ) - bool has_feature_thread_local = true; - #else - // Explicit error of compilation for CMake test purpose - has_feature_thread_local is not defined - #endif - bool result = has_feature_thread_local; - } ") - check_cxx_source_compiles("${CGAL_CAN_USE_CXX11_THREAD_LOCAL}" CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT) -endfunction() - set (CMAKE_CXX_STANDARD 14) enable_testing() @@ -58,16 +31,6 @@ if (DEBUG_TRACES) add_definitions(-DDEBUG_TRACES) endif() -set(GUDHI_CAN_USE_CXX11_THREAD_LOCAL " - int main() { - thread_local int result = 0; - return result; - } ") -check_cxx_source_compiles("${GUDHI_CAN_USE_CXX11_THREAD_LOCAL}" GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT) -if (GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT) - add_definitions(-DGUDHI_CAN_USE_CXX11_THREAD_LOCAL) -endif() - if(CMAKE_BUILD_TYPE MATCHES Debug) message("++ Debug compilation flags are: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") else() diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 22af3ec9..f00966a5 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -128,16 +128,6 @@ if(PYTHONINTERP_FOUND) endif () if(CGAL_FOUND) - can_cgal_use_cxx11_thread_local() - if (NOT CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT) - if(CMAKE_BUILD_TYPE MATCHES Debug) - add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_DEBUG}") - else() - add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_RELEASE}") - endif() - message("** Add Boost ${Boost_LIBRARY_DIRS}") - set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${Boost_LIBRARY_DIRS}', ") - endif() # Add CGAL compilation args if(CGAL_HEADER_ONLY) add_gudhi_debug_info("CGAL header only version ${CGAL_VERSION}") -- cgit v1.2.3 From a4bf8306d3926428a7d5087d96fbf8033d3bd932 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 17 Mar 2020 16:17:57 -0400 Subject: fix Marc's comments --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 53 +++++++++++++-------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 5b36cc1c..6c837042 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -126,8 +126,8 @@ class Simplex_tree { private: typedef typename Dictionary::iterator Dictionary_it; typedef typename Dictionary_it::value_type Dit_value_t; - double minval_; - double maxval_; + Filtration_value minval_; + Filtration_value maxval_; struct return_first { Vertex_handle operator()(const Dit_value_t& p_sh) const { @@ -1484,25 +1484,25 @@ class Simplex_tree { /** \brief Retrieve good values for extended persistence, and separate the * diagrams into the ordinary, relative, extended+ and extended- subdiagrams. - * \post This function should be called only if extend_filtration has been called first! + * \pre This function should be called only if this->extend_filtration() has been called first! * \post The coordinates of the persistence diagram points might be a little different than the * original filtration values due to the internal transformation (scaling to [-2,-1]) that is * performed on these values during the computation of extended persistence. - * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration, - * this->initialize_filtration, and this->compute_persistent_cohomology. + * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration(), + * this->initialize_filtration(), and Gudhi::persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(). * @return A vector of four persistence diagrams. The first one is Ordinary, the * second one is Relative, the third one is Extended+ and the fourth one is Extended-. * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. */ - std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ - std::vector>>> new_dgm(4); - double x, y; - double minval_ = this->minval_; - double maxval_ = this->maxval_; + std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ + std::vector>>> new_dgm(4); + Filtration_value x, y; + Filtration_value minval_ = this->minval_; + Filtration_value maxval_ = this->maxval_; for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; - double px = dgm[i].second.first; - double py = dgm[i].second.second; + Filtration_value px = dgm[i].second.first; + Filtration_value py = dgm[i].second.second; if(std::isinf(py)) continue; else{ if ((px <= -1) & (py <= -1)){ @@ -1510,12 +1510,12 @@ class Simplex_tree { y = minval_ + (maxval_-minval_)*(py + 2); new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); } - if ((px >= 1) & (py >= 1)){ + else if ((px >= 1) & (py >= 1)){ x = minval_ - (maxval_-minval_)*(px - 2); y = minval_ - (maxval_-minval_)*(py - 2); new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); } - if ((px <= -1) & (py >= 1)){ + else { x = minval_ + (maxval_-minval_)*(px + 2); y = minval_ - (maxval_-minval_)*(py - 2); if (x <= y){ @@ -1539,37 +1539,36 @@ class Simplex_tree { * retrieves the original values and separates the extended persistence diagram points * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after * computing the persistent homology of the extended simplicial complex. - * \post Note that this code creates an extra vertex internally, so you should make sure that + * \pre Note that this code creates an extra vertex internally, so you should make sure that * the Simplex tree does not contain a vertex with the largest Vertex_handle. */ void extend_filtration() { // Compute maximum and minimum of filtration values - int maxvert = std::numeric_limits::min(); - this->minval_ = std::numeric_limits::max(); - this->maxval_ = std::numeric_limits::min(); + Vertex_handle maxvert = std::numeric_limits::min(); + this->minval_ = std::numeric_limits::infinity(); + this->maxval_ = -std::numeric_limits::infinity(); for (auto sh = root_.members().begin(); sh != root_.members().end(); ++sh){ - double f = this->filtration(sh); + Filtration_value f = this->filtration(sh); this->minval_ = std::min(this->minval_, f); this->maxval_ = std::max(this->maxval_, f); maxvert = std::max(sh->first, maxvert); } - GUDHI_CHECK(maxvert < std::numeric_limits::max(), std::invalid_argument("Simplex_tree contains a vertex with the largest Vertex_handle")); + GUDHI_CHECK(maxvert < std::numeric_limits::max(), std::invalid_argument("Simplex_tree contains a vertex with the largest Vertex_handle")); maxvert += 1; Simplex_tree st_copy = *this; // Add point for coning the simplicial complex - int count = this->num_simplices(); this->insert_simplex({maxvert}, -3); - count++; // For each simplex + std::vector vr; for (auto sh_copy : st_copy.complex_simplex_range()){ // Locate simplex - std::vector vr; + vr.clear(); for (auto vh : st_copy.simplex_vertex_range(sh_copy)){ vr.push_back(vh); } @@ -1578,18 +1577,18 @@ class Simplex_tree { // Create cone on simplex vr.push_back(maxvert); if (this->dimension(sh) == 0){ + Filtration_value v = this->filtration(sh); + Filtration_value scaled_v = (v-this->minval_)/(this->maxval_-this->minval_); // Assign ascending value between -2 and -1 to vertex - double v = this->filtration(sh); - this->assign_filtration(sh, -2 + (v-this->minval_)/(this->maxval_-this->minval_)); + this->assign_filtration(sh, -2 + scaled_v); // Assign descending value between 1 and 2 to cone on vertex - this->insert_simplex(vr, 2 - (v-this->minval_)/(this->maxval_-this->minval_)); + this->insert_simplex(vr, 2 - scaled_v); } else{ // Assign value -3 to simplex and cone on simplex this->assign_filtration(sh, -3); this->insert_simplex(vr, -3); } - count++; } // Automatically assign good values for simplices -- cgit v1.2.3 From e1c8edc4b148331083f53c7c3d34766190bb6d99 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 17 Mar 2020 22:16:23 +0100 Subject: Another proposal to fix #248 --- src/python/doc/alpha_complex_sum.inc | 2 +- src/python/doc/bottleneck_distance_sum.inc | 2 +- src/python/doc/cubical_complex_sum.inc | 2 +- src/python/doc/nerve_gic_complex_sum.inc | 2 +- src/python/doc/persistence_graphical_tools_sum.inc | 2 +- src/python/doc/persistent_cohomology_sum.inc | 2 +- src/python/doc/point_cloud_sum.inc | 2 +- src/python/doc/representations_sum.inc | 2 +- src/python/doc/rips_complex_sum.inc | 2 +- src/python/doc/simplex_tree_sum.inc | 2 +- src/python/doc/tangential_complex_sum.inc | 2 +- src/python/doc/wasserstein_distance_sum.inc | 2 +- src/python/doc/witness_complex_sum.inc | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc index b5af0d27..00c35155 100644 --- a/src/python/doc/alpha_complex_sum.inc +++ b/src/python/doc/alpha_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | diff --git a/src/python/doc/bottleneck_distance_sum.inc b/src/python/doc/bottleneck_distance_sum.inc index 6eb0ac19..a01e7f04 100644 --- a/src/python/doc/bottleneck_distance_sum.inc +++ b/src/python/doc/bottleneck_distance_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi | diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc index f200e695..ab6388e5 100644 --- a/src/python/doc/cubical_complex_sum.inc +++ b/src/python/doc/cubical_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+ | .. figure:: | The cubical complex is an example of a structured complex useful in | :Author: Pawel Dlotko | diff --git a/src/python/doc/nerve_gic_complex_sum.inc b/src/python/doc/nerve_gic_complex_sum.inc index d633c4ff..d5356eca 100644 --- a/src/python/doc/nerve_gic_complex_sum.inc +++ b/src/python/doc/nerve_gic_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière | diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index ef376802..723c0f78 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | diff --git a/src/python/doc/persistent_cohomology_sum.inc b/src/python/doc/persistent_cohomology_sum.inc index 4d7b077e..9c29bfaa 100644 --- a/src/python/doc/persistent_cohomology_sum.inc +++ b/src/python/doc/persistent_cohomology_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | The theory of homology consists in attaching to a topological space | :Author: Clément Maria | diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc index 85d52de7..77245e86 100644 --- a/src/python/doc/point_cloud_sum.inc +++ b/src/python/doc/point_cloud_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, etc. | :Author: Vincent Rouvreau | diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index 700828f1..edb8a448 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc index 857c6893..a1f0e469 100644 --- a/src/python/doc/rips_complex_sum.inc +++ b/src/python/doc/rips_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+ | .. figure:: | Rips complex is a simplicial complex constructed from a one skeleton | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse | diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc index 5ba58d2b..3c637b8c 100644 --- a/src/python/doc/simplex_tree_sum.inc +++ b/src/python/doc/simplex_tree_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+ | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria | diff --git a/src/python/doc/tangential_complex_sum.inc b/src/python/doc/tangential_complex_sum.inc index d84aa433..ddc3e609 100644 --- a/src/python/doc/tangential_complex_sum.inc +++ b/src/python/doc/tangential_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin | diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc index a97f428d..1632befa 100644 --- a/src/python/doc/wasserstein_distance_sum.inc +++ b/src/python/doc/wasserstein_distance_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe | diff --git a/src/python/doc/witness_complex_sum.inc b/src/python/doc/witness_complex_sum.inc index 71b65a71..f9c009ab 100644 --- a/src/python/doc/witness_complex_sum.inc +++ b/src/python/doc/witness_complex_sum.inc @@ -1,5 +1,5 @@ .. table:: - :widths: 30 50 20 + :widths: 30 40 30 +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich | -- cgit v1.2.3 From 6f445b7e2bdb8481198f8c0f0e076d4fea081d62 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 18 Mar 2020 12:37:40 -0400 Subject: fix doc --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 6c837042..697afe26 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1484,12 +1484,12 @@ class Simplex_tree { /** \brief Retrieve good values for extended persistence, and separate the * diagrams into the ordinary, relative, extended+ and extended- subdiagrams. - * \pre This function should be called only if this->extend_filtration() has been called first! + * \pre This function should be called only if `extend_filtration()` has been called first! * \post The coordinates of the persistence diagram points might be a little different than the * original filtration values due to the internal transformation (scaling to [-2,-1]) that is * performed on these values during the computation of extended persistence. - * @param[in] dgm Persistence diagram obtained after calling this->extend_filtration(), - * this->initialize_filtration(), and Gudhi::persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(). + * @param[in] dgm Persistence diagram obtained after calling `extend_filtration()`, + * `initialize_filtration()`, and `Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, CoefficientField >::compute_persistent_cohomology()`. * @return A vector of four persistence diagrams. The first one is Ordinary, the * second one is Relative, the third one is Extended+ and the fourth one is Extended-. * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. @@ -1535,7 +1535,7 @@ class Simplex_tree { * and computes the extended persistence diagram induced by the lower-star filtration * computed with these values. * \post Note that after calling this function, the filtration - * values are actually modified. The function compute_extended_persistence_subdiagrams + * values are actually modified. The function `compute_extended_persistence_subdiagrams()` * retrieves the original values and separates the extended persistence diagram points * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after * computing the persistent homology of the extended simplicial complex. @@ -1545,7 +1545,7 @@ class Simplex_tree { void extend_filtration() { // Compute maximum and minimum of filtration values - Vertex_handle maxvert = std::numeric_limits::min(); + Vertex_handle maxvert = std::numeric_limits::min(); this->minval_ = std::numeric_limits::infinity(); this->maxval_ = -std::numeric_limits::infinity(); for (auto sh = root_.members().begin(); sh != root_.members().end(); ++sh){ -- cgit v1.2.3 From 61691b0081cb868645335c0b1433ddcc0bcbf9e3 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 19 Mar 2020 13:09:59 -0400 Subject: new fixes --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 45 ++++++++++++++++----------- src/python/gudhi/simplex_tree.pxd | 4 +-- src/python/gudhi/simplex_tree.pyx | 32 ++++++++++++++----- src/python/include/Simplex_tree_interface.h | 13 ++++++++ src/python/test/test_simplex_tree.py | 18 ++++++----- 5 files changed, 77 insertions(+), 35 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 697afe26..50b8e582 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -100,6 +100,12 @@ class Simplex_tree { void assign_key(Simplex_key); Simplex_key key() const; }; + struct Extended_filtration_data { + Filtration_value minval; + Filtration_value maxval; + Extended_filtration_data(){} + Extended_filtration_data(Filtration_value vmin, Filtration_value vmax){ minval = vmin; maxval = vmax; } + }; typedef typename std::conditional::type Key_simplex_base; @@ -126,8 +132,6 @@ class Simplex_tree { private: typedef typename Dictionary::iterator Dictionary_it; typedef typename Dictionary_it::value_type Dit_value_t; - Filtration_value minval_; - Filtration_value maxval_; struct return_first { Vertex_handle operator()(const Dit_value_t& p_sh) const { @@ -1490,15 +1494,16 @@ class Simplex_tree { * performed on these values during the computation of extended persistence. * @param[in] dgm Persistence diagram obtained after calling `extend_filtration()`, * `initialize_filtration()`, and `Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, CoefficientField >::compute_persistent_cohomology()`. + * @param[in] efd Structure containing the minimum and maximum values of the original filtration * @return A vector of four persistence diagrams. The first one is Ordinary, the * second one is Relative, the third one is Extended+ and the fourth one is Extended-. * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. */ - std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ + std::vector>>> extended_persistence_subdiagrams(const std::vector>>& dgm, const Extended_filtration_data& efd){ std::vector>>> new_dgm(4); Filtration_value x, y; - Filtration_value minval_ = this->minval_; - Filtration_value maxval_ = this->maxval_; + Filtration_value minval = efd.minval; + Filtration_value maxval = efd.maxval; for(unsigned int i = 0; i < dgm.size(); i++){ int h = dgm[i].first; Filtration_value px = dgm[i].second.first; @@ -1506,18 +1511,18 @@ class Simplex_tree { if(std::isinf(py)) continue; else{ if ((px <= -1) & (py <= -1)){ - x = minval_ + (maxval_-minval_)*(px + 2); - y = minval_ + (maxval_-minval_)*(py + 2); + x = minval + (maxval-minval)*(px + 2); + y = minval + (maxval-minval)*(py + 2); new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); } else if ((px >= 1) & (py >= 1)){ - x = minval_ - (maxval_-minval_)*(px - 2); - y = minval_ - (maxval_-minval_)*(py - 2); + x = minval - (maxval-minval)*(px - 2); + y = minval - (maxval-minval)*(py - 2); new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); } else { - x = minval_ + (maxval_-minval_)*(px + 2); - y = minval_ - (maxval_-minval_)*(py - 2); + x = minval + (maxval-minval)*(px + 2); + y = minval - (maxval-minval)*(py - 2); if (x <= y){ new_dgm[2].push_back(std::make_pair(h, std::make_pair(x,y))); } @@ -1535,23 +1540,23 @@ class Simplex_tree { * and computes the extended persistence diagram induced by the lower-star filtration * computed with these values. * \post Note that after calling this function, the filtration - * values are actually modified. The function `compute_extended_persistence_subdiagrams()` + * values are actually modified. The function `extended_persistence_subdiagrams()` * retrieves the original values and separates the extended persistence diagram points * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after * computing the persistent homology of the extended simplicial complex. * \pre Note that this code creates an extra vertex internally, so you should make sure that * the Simplex tree does not contain a vertex with the largest Vertex_handle. */ - void extend_filtration() { + Extended_filtration_data extend_filtration() { // Compute maximum and minimum of filtration values Vertex_handle maxvert = std::numeric_limits::min(); - this->minval_ = std::numeric_limits::infinity(); - this->maxval_ = -std::numeric_limits::infinity(); + Filtration_value minval = std::numeric_limits::infinity(); + Filtration_value maxval = -std::numeric_limits::infinity(); for (auto sh = root_.members().begin(); sh != root_.members().end(); ++sh){ Filtration_value f = this->filtration(sh); - this->minval_ = std::min(this->minval_, f); - this->maxval_ = std::max(this->maxval_, f); + minval = std::min(minval, f); + maxval = std::max(maxval, f); maxvert = std::max(sh->first, maxvert); } @@ -1578,7 +1583,7 @@ class Simplex_tree { vr.push_back(maxvert); if (this->dimension(sh) == 0){ Filtration_value v = this->filtration(sh); - Filtration_value scaled_v = (v-this->minval_)/(this->maxval_-this->minval_); + Filtration_value scaled_v = (v-minval)/(maxval-minval); // Assign ascending value between -2 and -1 to vertex this->assign_filtration(sh, -2 + scaled_v); // Assign descending value between 1 and 2 to cone on vertex @@ -1593,6 +1598,10 @@ class Simplex_tree { // Automatically assign good values for simplices this->make_filtration_non_decreasing(); + + // Return the filtration data + Extended_filtration_data efd(minval, maxval); + return efd; } /** \brief Returns a vertex of `sh` that has the same filtration value as `sh` if it exists, and `null_vertex()` otherwise. diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index ae32eb82..b6284af4 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -57,8 +57,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": void remove_maximal_simplex(vector[int] simplex) bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() - void extend_filtration() - vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]]) + void compute_extended_filtration() + vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm) # Iterators over Simplex tree pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) Simplex_tree_simplices_iterator get_simplices_iterator_begin() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 7af44683..3502000a 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -405,7 +405,7 @@ cdef class SimplexTree: Note that after calling this function, the filtration values are actually modified within the Simplex_tree. - The function :func:`compute_extended_persistence_subdiagrams()` + The function :func:`extended_persistence()` retrieves the original values. .. note:: @@ -413,21 +413,31 @@ cdef class SimplexTree: Note that this code creates an extra vertex internally, so you should make sure that the Simplex_tree does not contain a vertex with the largest Vertex_handle. """ - return self.get_ptr().extend_filtration() + return self.get_ptr().compute_extended_filtration() - def compute_extended_persistence_subdiagrams(self, dgm): + def extended_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): """This function retrieves good values for extended persistence, and separate the diagrams into the ordinary, relative, extended+ and extended- subdiagrams. - :param dgm: Persistence diagram obtained after calling :func:`extend_filtration()`, :func:`initialize_filtration()`, and :func:`persistence()`. - + :param homology_coeff_field: The homology coefficient field. Must be a + prime number. Default value is 11. + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :param persistence_dim_max: If true, the persistent homology for the + maximal dimension in the complex is computed. If false, it is + ignored. Default is false. + :type persistence_dim_max: bool :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: This function should be called only if :func:`extend_filtration()`, :func:`initialize_filtration()`, - and :func:`persistence()` have been called first! + and (optionally) :func:`persistence()` have been called first! .. note:: @@ -435,7 +445,15 @@ cdef class SimplexTree: original filtration values due to the internal transformation (scaling to [-2,-1]) that is performed on these values during the computation of extended persistence. """ - return self.get_ptr().compute_extended_persistence_subdiagrams(dgm) + cdef vector[pair[int, pair[double, double]]] persistence_result + if self.pcohptr == NULL: + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) + if self.pcohptr != NULL: + self.pcohptr.get_persistence(homology_coeff_field, min_persistence) + if self.pcohptr != NULL: + pairs = self.pcohptr.persistence_pairs() + persistence_result = [(len(splx1)-1, [self.filtration(splx1), self.filtration(splx2)]) for [splx1, splx2] in pairs] + return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result) def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 4a7062d6..50ed58d0 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -37,8 +37,12 @@ class Simplex_tree_interface : public Simplex_tree { using Filtered_simplices = std::vector; using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator; using Complex_simplex_iterator = typename Base::Complex_simplex_iterator; + using Extended_filtration_data = typename Base::Extended_filtration_data; public: + + Extended_filtration_data efd; + bool find_simplex(const Simplex& vh) { return (Base::find(vh) != Base::null_simplex()); } @@ -117,6 +121,15 @@ class Simplex_tree_interface : public Simplex_tree { return cofaces; } + void compute_extended_filtration() { + this->efd = this->extend_filtration(); + return; + } + + std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ + return this->extended_persistence_subdiagrams(dgm, this->efd); + } + void create_persistence(Gudhi::Persistent_cohomology_interface* pcoh) { Base::initialize_filtration(); pcoh = new Gudhi::Persistent_cohomology_interface(*this); diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 63eee9a5..20f6aabf 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -9,6 +9,7 @@ """ from gudhi import SimplexTree +import pytest __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" @@ -322,15 +323,16 @@ def test_extend_filtration(): ([0, 3, 6], 2.0) ] + dgms = st.extended_persistence() - dgm = st.persistence() - L = st.compute_extended_persistence_subdiagrams(dgm) - assert L == [ - [(0, (1.9999999999999998, 2.9999999999999996))], - [(1, (5.0, 4.0))], - [(0, (1.0, 6.0))], - [(1, (6.0, 1.0))] - ] + assert dgms[0][0][1][0] == pytest.approx(2.) + assert dgms[0][0][1][1] == pytest.approx(3.) + assert dgms[1][0][1][0] == pytest.approx(5.) + assert dgms[1][0][1][1] == pytest.approx(4.) + assert dgms[2][0][1][0] == pytest.approx(1.) + assert dgms[2][0][1][1] == pytest.approx(6.) + assert dgms[3][0][1][0] == pytest.approx(6.) + assert dgms[3][0][1][1] == pytest.approx(1.) def test_simplices_iterator(): -- cgit v1.2.3 From 361abfcfa9ec18c76837f847f8e2e3a060cf7db7 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 19 Mar 2020 17:02:55 -0400 Subject: added decoding function --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 82 +++++++++++---------------- src/python/gudhi/simplex_tree.pyx | 10 +--- src/python/include/Simplex_tree_interface.h | 27 ++++++++- 3 files changed, 63 insertions(+), 56 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 50b8e582..9008c5f2 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -87,6 +87,8 @@ class Simplex_tree { /* \brief Set of nodes sharing a same parent in the simplex tree. */ typedef Simplex_tree_siblings Siblings; + enum Extended_simplex_type {UP, DOWN, EXTRA}; + struct Key_simplex_base_real { Key_simplex_base_real() : key_(-1) {} void assign_key(Simplex_key k) { key_ = k; } @@ -1486,66 +1488,50 @@ class Simplex_tree { } } - /** \brief Retrieve good values for extended persistence, and separate the - * diagrams into the ordinary, relative, extended+ and extended- subdiagrams. + /** \brief Retrieve the original filtration value for a given simplex in the Simplex_tree. Since the + * computation of extended persistence requires modifying the filtration values, this function can be used + * to recover the original values. Moreover, computing extended persistence requires adding new simplices + * in the Simplex_tree. Hence, this function also outputs the type of each simplex. It can be either UP (which means + * that the simplex was present originally, and is thus part of the ascending extended filtration), DOWN (which means + * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or + * EXTRA (which means the simplex is the cone point). Note that if the simplex type is DOWN, the original filtration value + * is set to be the original filtration value of the corresponding (not coned) original simplex. * \pre This function should be called only if `extend_filtration()` has been called first! - * \post The coordinates of the persistence diagram points might be a little different than the - * original filtration values due to the internal transformation (scaling to [-2,-1]) that is - * performed on these values during the computation of extended persistence. - * @param[in] dgm Persistence diagram obtained after calling `extend_filtration()`, - * `initialize_filtration()`, and `Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, CoefficientField >::compute_persistent_cohomology()`. - * @param[in] efd Structure containing the minimum and maximum values of the original filtration - * @return A vector of four persistence diagrams. The first one is Ordinary, the - * second one is Relative, the third one is Extended+ and the fourth one is Extended-. - * See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + * \post The output filtration value is supposed to be the same, but might be a little different, than the + * original filtration value, due to the internal transformation (scaling to [-2,-1]) that is + * performed on the original filtration values during the computation of extended persistence. + * @param[in] f Filtration value of the simplex in the extended (i.e., modified) filtration. + * @param[in] efd Structure containing the minimum and maximum values of the original filtration. This the output of `extend_filtration()`. + * @return A pair containing the original filtration value of the simplex as well as the simplex type. */ - std::vector>>> extended_persistence_subdiagrams(const std::vector>>& dgm, const Extended_filtration_data& efd){ - std::vector>>> new_dgm(4); - Filtration_value x, y; + std::pair decode_extended_filtration(Filtration_value f, const Extended_filtration_data& efd){ + std::pair p; Filtration_value minval = efd.minval; Filtration_value maxval = efd.maxval; - for(unsigned int i = 0; i < dgm.size(); i++){ - int h = dgm[i].first; - Filtration_value px = dgm[i].second.first; - Filtration_value py = dgm[i].second.second; - if(std::isinf(py)) continue; - else{ - if ((px <= -1) & (py <= -1)){ - x = minval + (maxval-minval)*(px + 2); - y = minval + (maxval-minval)*(py + 2); - new_dgm[0].push_back(std::make_pair(h, std::make_pair(x,y))); - } - else if ((px >= 1) & (py >= 1)){ - x = minval - (maxval-minval)*(px - 2); - y = minval - (maxval-minval)*(py - 2); - new_dgm[1].push_back(std::make_pair(h, std::make_pair(x,y))); - } - else { - x = minval + (maxval-minval)*(px + 2); - y = minval - (maxval-minval)*(py - 2); - if (x <= y){ - new_dgm[2].push_back(std::make_pair(h, std::make_pair(x,y))); - } - else{ - new_dgm[3].push_back(std::make_pair(h, std::make_pair(x,y))); - } - } - } + if (f >= -2 && f <= -1){ + p.first = minval + (maxval-minval)*(f + 2); p.second = UP; } - return new_dgm; - } + else if (f >= 1 && f <= 2){ + p.first = minval - (maxval-minval)*(f - 2); p.second = DOWN; + } + else{ + p.first = -3; p.second = EXTRA; + } + return p; + }; /** \brief Extend filtration for computing extended persistence. * This function only uses the filtration values at the 0-dimensional simplices, * and computes the extended persistence diagram induced by the lower-star filtration * computed with these values. * \post Note that after calling this function, the filtration - * values are actually modified. The function `extended_persistence_subdiagrams()` - * retrieves the original values and separates the extended persistence diagram points - * w.r.t. their types (Ord, Rel, Ext+, Ext-) and should always be called after - * computing the persistent homology of the extended simplicial complex. + * values are actually modified. The function `decode_extended_filtration()` + * retrieves the original values and outputs the extended simplex type. * \pre Note that this code creates an extra vertex internally, so you should make sure that - * the Simplex tree does not contain a vertex with the largest Vertex_handle. + * the Simplex tree does not contain a vertex with the largest Vertex_handle. + * @return A data structure containing the maximum and minimum values of the original filtration. + * It is meant to be provided as input to `decode_extended_filtration()` in order to retrieve + * the original filtration values for each simplex. */ Extended_filtration_data extend_filtration() { diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 3502000a..2cd81c14 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -415,9 +415,9 @@ cdef class SimplexTree: """ return self.get_ptr().compute_extended_filtration() - def extended_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): + def extended_persistence(self, homology_coeff_field=11, min_persistence=0): """This function retrieves good values for extended persistence, and separate the diagrams - into the ordinary, relative, extended+ and extended- subdiagrams. + into the Ordinary, Relative, Extended+ and Extended- subdiagrams. :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. @@ -427,10 +427,6 @@ cdef class SimplexTree: 0.0. Sets min_persistence to -1.0 to see all values. :type min_persistence: float. - :param persistence_dim_max: If true, the persistent homology for the - maximal dimension in the complex is computed. If false, it is - ignored. Default is false. - :type persistence_dim_max: bool :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: @@ -447,7 +443,7 @@ cdef class SimplexTree: """ cdef vector[pair[int, pair[double, double]]] persistence_result if self.pcohptr == NULL: - self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), True) if self.pcohptr != NULL: self.pcohptr.get_persistence(homology_coeff_field, min_persistence) if self.pcohptr != NULL: diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 50ed58d0..a6b1a06e 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -38,6 +38,7 @@ class Simplex_tree_interface : public Simplex_tree { using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator; using Complex_simplex_iterator = typename Base::Complex_simplex_iterator; using Extended_filtration_data = typename Base::Extended_filtration_data; + using Extended_simplex_type = typename Base::Extended_simplex_type; public: @@ -127,7 +128,31 @@ class Simplex_tree_interface : public Simplex_tree { } std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ - return this->extended_persistence_subdiagrams(dgm, this->efd); + std::vector>>> new_dgm(4); + for (unsigned int i = 0; i < dgm.size(); i++){ + std::pair px = this->decode_extended_filtration(dgm[i].second.first, this->efd); + std::pair py = this->decode_extended_filtration(dgm[i].second.second, this->efd); + std::pair> pd_point = std::make_pair(dgm[i].first, std::make_pair(px.first, py.first)); + //Ordinary + if (px.second == Base::UP && py.second == Base::UP){ + new_dgm[0].push_back(pd_point); + } + // Relative + else if (px.second == Base::DOWN && py.second == Base::DOWN){ + new_dgm[1].push_back(pd_point); + } + else{ + // Extended+ + if (px.first < py.first){ + new_dgm[2].push_back(pd_point); + } + //Extended- + else{ + new_dgm[3].push_back(pd_point); + } + } + } + return new_dgm; } void create_persistence(Gudhi::Persistent_cohomology_interface* pcoh) { -- cgit v1.2.3 From 1e0e378ab442672ef569e93c4114b0e99ea70f6e Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 20 Mar 2020 12:47:13 -0400 Subject: small fix --- src/python/gudhi/simplex_tree.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 2cd81c14..5b850462 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -443,7 +443,7 @@ cdef class SimplexTree: """ cdef vector[pair[int, pair[double, double]]] persistence_result if self.pcohptr == NULL: - self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), True) + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False) if self.pcohptr != NULL: self.pcohptr.get_persistence(homology_coeff_field, min_persistence) if self.pcohptr != NULL: -- cgit v1.2.3 From 59585b27d83f3835f46a47c90c732bdb2a19d376 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 23 Mar 2020 12:14:07 +0100 Subject: Doc review: document what is the filtration value when not computed --- src/Alpha_complex/include/gudhi/Alpha_complex.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index 65c969d2..1b5d6997 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -259,7 +259,8 @@ class Alpha_complex { * `true`. * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not CGAL::Epeck_d. - * @param[in] default_filtration_value Set this value to `true` if filtration values are not needed to be computed. + * @param[in] default_filtration_value Set this value to `true` if filtration values are not needed to be computed + * (will be set to `NaN`). * Default value is `false` (which means compute the filtration values). * * @return true if creation succeeds, false otherwise. -- cgit v1.2.3 From 24b751dd555236a522a40c07e25cf4a2e291f194 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 23 Mar 2020 12:14:51 +0100 Subject: Code review: use complex_simplex_range instead of the filtration one --- src/Alpha_complex/test/Delaunay_complex_unit_test.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp index 71164705..fa97f249 100644 --- a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp +++ b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp @@ -51,14 +51,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of // Alpha complex Gudhi::Simplex_tree<> stree_from_alpha_complex; BOOST_CHECK(alpha_complex.create_complex(stree_from_alpha_complex)); - stree_from_alpha_complex.initialize_filtration(); // Delaunay complex Gudhi::Simplex_tree<> stree_from_delaunay_complex; BOOST_CHECK(alpha_complex.create_complex(stree_from_delaunay_complex, 0., false, true)); // Check all the simplices from alpha complex are in the Delaunay complex - for (auto f_simplex : stree_from_alpha_complex.filtration_simplex_range()) { + for (auto f_simplex : stree_from_alpha_complex.complex_simplex_range()) { std::vector::Vertex_handle> simplex; for (Gudhi::Simplex_tree<>::Vertex_handle vertex : stree_from_alpha_complex.simplex_vertex_range(f_simplex)) { std::cout << "(" << vertex << ")"; -- cgit v1.2.3 From fb4608ba9e1ccf2edfc28359a1632c6bd4c567b0 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 23 Mar 2020 16:36:52 +0100 Subject: Code review: remove useless vector --- src/Alpha_complex/test/Delaunay_complex_unit_test.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp index fa97f249..c1cc1fab 100644 --- a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp +++ b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp @@ -39,6 +39,9 @@ typedef CGAL::Epick_d< CGAL::Dimension_tag<5> > Inexact_kernel_s; typedef boost::mpl::list list_of_kernel_variants; +using Simplex_tree = Gudhi::Simplex_tree<>; +using Simplex_handle = Simplex_tree::Simplex_handle; + BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) { std::cout << "*****************************************************************************************************"; using Point = typename TestedKernel::Point_d; @@ -49,22 +52,16 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of Gudhi::alpha_complex::Alpha_complex alpha_complex(points); // Alpha complex - Gudhi::Simplex_tree<> stree_from_alpha_complex; + Simplex_tree stree_from_alpha_complex; BOOST_CHECK(alpha_complex.create_complex(stree_from_alpha_complex)); // Delaunay complex - Gudhi::Simplex_tree<> stree_from_delaunay_complex; + Simplex_tree stree_from_delaunay_complex; BOOST_CHECK(alpha_complex.create_complex(stree_from_delaunay_complex, 0., false, true)); // Check all the simplices from alpha complex are in the Delaunay complex for (auto f_simplex : stree_from_alpha_complex.complex_simplex_range()) { - std::vector::Vertex_handle> simplex; - for (Gudhi::Simplex_tree<>::Vertex_handle vertex : stree_from_alpha_complex.simplex_vertex_range(f_simplex)) { - std::cout << "(" << vertex << ")"; - simplex.push_back(vertex); - } - std::cout << std::endl; - Gudhi::Simplex_tree<>::Simplex_handle sh = stree_from_delaunay_complex.find(simplex); + Simplex_handle sh = stree_from_delaunay_complex.find(stree_from_alpha_complex.simplex_vertex_range(f_simplex)); BOOST_CHECK(std::isnan(stree_from_delaunay_complex.filtration(sh))); BOOST_CHECK(sh != stree_from_delaunay_complex.null_simplex()); } -- cgit v1.2.3 From cf29f4a485d06469d17c6d12d306901fa3c5ab36 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 23 Mar 2020 18:11:15 +0100 Subject: Shorter headers in sphinx: Introduced in -> Since and Copyright -> License --- src/python/doc/alpha_complex_sum.inc | 4 ++-- src/python/doc/bottleneck_distance_sum.inc | 4 ++-- src/python/doc/cubical_complex_sum.inc | 4 ++-- src/python/doc/cubical_complex_user.rst | 2 +- src/python/doc/nerve_gic_complex_sum.inc | 4 ++-- src/python/doc/persistence_graphical_tools_sum.inc | 4 ++-- src/python/doc/persistent_cohomology_sum.inc | 4 ++-- src/python/doc/persistent_cohomology_user.rst | 2 +- src/python/doc/point_cloud_sum.inc | 4 ++-- src/python/doc/representations_sum.inc | 4 ++-- src/python/doc/rips_complex_sum.inc | 4 ++-- src/python/doc/rips_complex_user.rst | 2 +- src/python/doc/simplex_tree_sum.inc | 4 ++-- src/python/doc/tangential_complex_sum.inc | 4 ++-- src/python/doc/wasserstein_distance_sum.inc | 4 ++-- src/python/doc/witness_complex_sum.inc | 4 ++-- 16 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc index 00c35155..9e6414d0 100644 --- a/src/python/doc/alpha_complex_sum.inc +++ b/src/python/doc/alpha_complex_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | | - | :alt: Alpha complex representation | | :Introduced in: GUDHI 2.0.0 | + | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 | | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | | - | | the circumradius of the simplex if the circumsphere is empty (the | :Copyright: MIT (`GPL v3 `_) | + | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 `_) | | | simplex is then said to be Gabriel), and as the minimum of the | | | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | | | Gabriel otherwise. | | diff --git a/src/python/doc/bottleneck_distance_sum.inc b/src/python/doc/bottleneck_distance_sum.inc index a01e7f04..0de4625c 100644 --- a/src/python/doc/bottleneck_distance_sum.inc +++ b/src/python/doc/bottleneck_distance_sum.inc @@ -4,9 +4,9 @@ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi | | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | | - | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Introduced in: GUDHI 2.0.0 | + | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 | | | diagonal points) such that any couple of matched points are at | | - | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :Copyright: MIT (`GPL v3 `_) | + | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 `_) | | the longest edge | norm in :math:`\mathbb{R}^2`. | | | | | :Requires: `CGAL `__ :math:`\geq` 4.11.0 | +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc index ab6388e5..28bf8e94 100644 --- a/src/python/doc/cubical_complex_sum.inc +++ b/src/python/doc/cubical_complex_sum.inc @@ -4,9 +4,9 @@ +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+ | .. figure:: | The cubical complex is an example of a structured complex useful in | :Author: Pawel Dlotko | | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | computational mathematics (specially rigorous numerics) and image | | - | :alt: Cubical complex representation | analysis. | :Introduced in: GUDHI 2.0.0 | + | :alt: Cubical complex representation | analysis. | :Since: GUDHI 2.0.0 | | :figclass: align-center | | | - | | | :Copyright: MIT | + | | | :License: MIT | | | | | +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+ | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` | diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index 56cf0170..93ca6b24 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -8,7 +8,7 @@ Definition ---------- ===================================== ===================================== ===================================== -:Author: Pawel Dlotko :Introduced in: GUDHI PYTHON 2.0.0 :Copyright: GPL v3 +:Author: Pawel Dlotko :Since: GUDHI PYTHON 2.0.0 :License: GPL v3 ===================================== ===================================== ===================================== +---------------------------------------------+----------------------------------------------------------------------+ diff --git a/src/python/doc/nerve_gic_complex_sum.inc b/src/python/doc/nerve_gic_complex_sum.inc index d5356eca..7fe55aff 100644 --- a/src/python/doc/nerve_gic_complex_sum.inc +++ b/src/python/doc/nerve_gic_complex_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière | | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | | - | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Introduced in: GUDHI 2.3.0 | + | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 | | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | | - | | the image of a scalar-valued function defined on the data. | :Copyright: MIT (`GPL v3 `_) | + | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 `_) | | | | | | | | :Requires: `CGAL `__ :math:`\geq` 4.11.0 | | | | | diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index 723c0f78..b68d3d7e 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -4,9 +4,9 @@ +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | - | | | :Introduced in: GUDHI 2.0.0 | + | | | :Since: GUDHI 2.0.0 | | | Note that these functions return the matplotlib axis, allowing | | - | | for further modifications (title, aspect, etc.) | :Copyright: MIT | + | | for further modifications (title, aspect, etc.) | :License: MIT | | | | | | | | :Requires: matplotlib, numpy and scipy | +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ diff --git a/src/python/doc/persistent_cohomology_sum.inc b/src/python/doc/persistent_cohomology_sum.inc index 9c29bfaa..0effb50f 100644 --- a/src/python/doc/persistent_cohomology_sum.inc +++ b/src/python/doc/persistent_cohomology_sum.inc @@ -4,9 +4,9 @@ +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | The theory of homology consists in attaching to a topological space | :Author: Clément Maria | | ../../doc/Persistent_cohomology/3DTorus_poch.png | a sequence of (homology) groups, capturing global topological | | - | :figclass: align-center | features like connected components, holes, cavities, etc. Persistent | :Introduced in: GUDHI 2.0.0 | + | :figclass: align-center | features like connected components, holes, cavities, etc. Persistent | :Since: GUDHI 2.0.0 | | | homology studies the evolution -- birth, life and death -- of these | | - | Rips Persistent Cohomology on a 3D | features when the topological space is changing. Consequently, the | :Copyright: MIT | + | Rips Persistent Cohomology on a 3D | features when the topological space is changing. Consequently, the | :License: MIT | | Torus | theory is essentially composed of three elements: topological spaces, | | | | their homology groups and an evolution scheme. | | | | | | diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index de83cda1..5f931b3a 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -7,7 +7,7 @@ Persistent cohomology user manual Definition ---------- ===================================== ===================================== ===================================== -:Author: Clément Maria :Introduced in: GUDHI PYTHON 2.0.0 :Copyright: GPL v3 +:Author: Clément Maria :Since: GUDHI PYTHON 2.0.0 :License: GPL v3 ===================================== ===================================== ===================================== +-----------------------------------------------------------------+-----------------------------------------------------------------------+ diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc index 77245e86..0a159680 100644 --- a/src/python/doc/point_cloud_sum.inc +++ b/src/python/doc/point_cloud_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, etc. | :Author: Vincent Rouvreau | | | :math:`(y_1, y_2, \ldots, y_d)` | | | - | | | :Introduced in: GUDHI 2.0.0 | + | | | :Since: GUDHI 2.0.0 | | | | | - | | | :Copyright: MIT (`GPL v3 `_) | + | | | :License: MIT (`GPL v3 `_) | | | Parts of this package require CGAL. | | | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | | | | | diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index edb8a448..eac89b9d 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -4,9 +4,9 @@ +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | - | | | :Introduced in: GUDHI 3.1.0 | + | | | :Since: GUDHI 3.1.0 | | | | | - | | | :Copyright: MIT | + | | | :License: MIT | | | | | | | | :Requires: scikit-learn | +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+ diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc index a1f0e469..6feb74cd 100644 --- a/src/python/doc/rips_complex_sum.inc +++ b/src/python/doc/rips_complex_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+ | .. figure:: | Rips complex is a simplicial complex constructed from a one skeleton | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse | | ../../doc/Rips_complex/rips_complex_representation.png | graph. | | - | :figclass: align-center | | :Introduced in: GUDHI 2.0.0 | + | :figclass: align-center | | :Since: GUDHI 2.0.0 | | | The filtration value of each edge is computed from a user-given | | - | | distance function and is inserted until a user-given threshold | :Copyright: MIT | + | | distance function and is inserted until a user-given threshold | :License: MIT | | | value. | | | | | | | | This complex can be built from a point cloud and a distance function, | | diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index a27573e8..8efb12e6 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -8,7 +8,7 @@ Definition ---------- ==================================================================== ================================ ====================== -:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse :Introduced in: GUDHI 2.0.0 :Copyright: GPL v3 +:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse :Since: GUDHI 2.0.0 :License: GPL v3 ==================================================================== ================================ ====================== +-------------------------------------------+----------------------------------------------------------------------+ diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc index 3c637b8c..a8858f16 100644 --- a/src/python/doc/simplex_tree_sum.inc +++ b/src/python/doc/simplex_tree_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+ | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria | | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | | - | :alt: Simplex tree representation | | :Introduced in: GUDHI 2.0.0 | + | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 | | :figclass: align-center | The data structure is described in | | - | | :cite:`boissonnatmariasimplextreealgorithmica` | :Copyright: MIT | + | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT | | | | | +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+ | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` | diff --git a/src/python/doc/tangential_complex_sum.inc b/src/python/doc/tangential_complex_sum.inc index ddc3e609..45ce2a66 100644 --- a/src/python/doc/tangential_complex_sum.inc +++ b/src/python/doc/tangential_complex_sum.inc @@ -4,9 +4,9 @@ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin | | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | | - | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Introduced in: GUDHI 2.0.0 | + | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Since: GUDHI 2.0.0 | | | an unknown manifold. The running time depends only linearly on the | | - | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :Copyright: MIT (`GPL v3 `_) | + | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :License: MIT (`GPL v3 `_) | | | dimension :math:`k`. | | | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc index 1632befa..0ff22035 100644 --- a/src/python/doc/wasserstein_distance_sum.inc +++ b/src/python/doc/wasserstein_distance_sum.inc @@ -4,9 +4,9 @@ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe | | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams. It's the minimum value c that can be achieved | | - | :figclass: align-center | by a perfect matching between the points of the two diagrams (+ all | :Introduced in: GUDHI 3.1.0 | + | :figclass: align-center | by a perfect matching between the points of the two diagrams (+ all | :Since: GUDHI 3.1.0 | | | diagonal points), where the value of a matching is defined as the | | - | Wasserstein distance is the q-th root of the sum of the | q-th root of the sum of all edge lengths to the power q. Edge lengths| :Copyright: MIT | + | Wasserstein distance is the q-th root of the sum of the | q-th root of the sum of all edge lengths to the power q. Edge lengths| :License: MIT | | edge lengths to the power q. | are measured in norm p, for :math:`1 \leq p \leq \infty`. | | | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ diff --git a/src/python/doc/witness_complex_sum.inc b/src/python/doc/witness_complex_sum.inc index f9c009ab..34d4df4a 100644 --- a/src/python/doc/witness_complex_sum.inc +++ b/src/python/doc/witness_complex_sum.inc @@ -4,9 +4,9 @@ +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich | | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | | - | :alt: Witness complex representation | | :Introduced in: GUDHI 2.0.0 | + | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 | | :figclass: align-center | The data structure is described in | | - | | :cite:`boissonnatmariasimplextreealgorithmica`. | :Copyright: MIT (`GPL v3 `_ for Euclidean versions only) | + | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 `_ for Euclidean versions only) | | | | | | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 for Euclidean versions only | +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ -- cgit v1.2.3 From 0b4eddeb0d53d465016d5eb913b382123bc5b891 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 23 Mar 2020 18:35:07 +0100 Subject: Avoid consecutive push_back --- src/python/include/Persistent_cohomology_interface.h | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 22d6f654..89ff5137 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -117,8 +117,8 @@ persistent_cohomology::Persistent_cohomologyvertex_with_same_filtration(t); - diags[dim].push_back(v); - diags[dim].push_back(w); + auto& d = diags[dim]; + d.insert(d.end(), { v, w }); } } return out; @@ -152,8 +152,8 @@ persistent_cohomology::Persistent_cohomologyedge_with_same_filtration(t); @@ -165,9 +165,8 @@ persistent_cohomology::Persistent_cohomologysimplex_vertex_range(s)); if(diags.size()==0)diags.emplace_back(); - diags[0].push_back(v); - diags[0].push_back(w1); - diags[0].push_back(w2); + auto& d = diags[0]; + d.insert(d.end(), { v, w1, w2 }); } else { auto es = stptr_->edge_with_same_filtration(s); auto&& es_vertices = stptr_->simplex_vertex_range(es); @@ -176,10 +175,8 @@ persistent_cohomology::Persistent_cohomology Date: Mon, 23 Mar 2020 18:52:49 +0100 Subject: Reuse vector Reuse + copy should be slightly faster than regrowing each time (and moving) --- src/python/include/Persistent_cohomology_interface.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 89ff5137..3ce40af5 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -73,15 +73,17 @@ persistent_cohomology::Persistent_cohomology, std::vector>> persistence_pairs; auto const& pairs = Base::get_persistent_pairs(); persistence_pairs.reserve(pairs.size()); + std::vector birth; + std::vector death; for (auto pair : pairs) { - std::vector birth; + birth.clear(); if (get<0>(pair) != stptr_->null_simplex()) { for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))) { birth.push_back(vertex); } } - std::vector death; + death.clear(); if (get<1>(pair) != stptr_->null_simplex()) { death.reserve(birth.size()+1); for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))) { @@ -89,7 +91,7 @@ persistent_cohomology::Persistent_cohomology Date: Mon, 23 Mar 2020 21:54:56 +0100 Subject: Add test --- src/python/test/test_simplex_generators.py | 57 ++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100755 src/python/test/test_simplex_generators.py diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py new file mode 100755 index 00000000..efb5f8e3 --- /dev/null +++ b/src/python/test/test_simplex_generators.py @@ -0,0 +1,57 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Marc Glisse + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +import gudhi +import numpy as np + + +def test_flag_generators(): + pts = np.array([[0, 0], [0, 1.01], [1, 0], [1.02, 1.03], [100, 0], [100, 3.01], [103, 0], [103.02, 3.03]]) + r = gudhi.RipsComplex(pts, max_edge_length=4) + st = r.create_simplex_tree(max_dimension=50) + st.persistence() + g = st.flag_persistence_generators() + assert np.array_equal(g[0], [[2, 2, 0], [1, 1, 0], [3, 3, 1], [6, 6, 4], [5, 5, 4], [7, 7, 5]]) + assert len(g[1]) == 1 + assert np.array_equal(g[1][0], [[3, 2, 2, 1]]) + assert np.array_equal(g[2], [0, 4]) + assert len(g[3]) == 1 + assert np.array_equal(g[3][0], [[7, 6]]) + + +def test_lower_star_generators(): + st = gudhi.SimplexTree() + st.insert([0, 1, 2], -10) + st.insert([0, 3], -10) + st.insert([1, 3], -10) + st.assign_filtration([2], -1) + st.assign_filtration([3], 0) + st.assign_filtration([0], 1) + st.assign_filtration([1], 2) + st.make_filtration_non_decreasing() + st.persistence(min_persistence=-1) + g = st.lower_star_persistence_generators(min_persistence=-1) + assert len(g[0]) == 2 + assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]]) + assert np.array_equal(g[0][1], [[1, 1]]) + assert len(g[1]) == 2 + assert np.array_equal(g[1][0], [2]) + assert np.array_equal(g[1][1], [1]) + + +def test_empty(): + st = gudhi.SimplexTree() + st.persistence() + assert st.lower_star_persistence_generators() == ([], []) + g = st.flag_persistence_generators() + assert np.array_equal(g[0], np.empty((0, 3))) + assert g[1] == [] + assert np.array_equal(g[2], []) + assert g[3] == [] -- cgit v1.2.3 From bc223c3cc7cb9e9c0bb3573af720fce9c5380b94 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Mon, 23 Mar 2020 21:22:16 -0400 Subject: new fixes --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 25 +++++++++++++++----- src/python/gudhi/simplex_tree.pxd | 2 +- src/python/gudhi/simplex_tree.pyx | 21 +++++++---------- src/python/include/Simplex_tree_interface.h | 34 ++++++++++++++------------- src/python/test/test_simplex_tree.py | 7 ++---- 5 files changed, 48 insertions(+), 41 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 9008c5f2..de97d6f2 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -42,6 +42,20 @@ namespace Gudhi { +/** + * \class Extended_simplex_type Simplex_tree.h gudhi/Simplex_tree.h + * \brief Extended simplex type data structure for representing the type of simplices in an extended filtration. + * + * \details The extended simplex type can be either UP (which means + * that the simplex was present originally, and is thus part of the ascending extended filtration), DOWN (which means + * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or + * EXTRA (which means the simplex is the cone point). + * + * Details may be found in section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z. + * + */ +enum class Extended_simplex_type {UP, DOWN, EXTRA}; + struct Simplex_tree_options_full_featured; /** @@ -87,7 +101,7 @@ class Simplex_tree { /* \brief Set of nodes sharing a same parent in the simplex tree. */ typedef Simplex_tree_siblings Siblings; - enum Extended_simplex_type {UP, DOWN, EXTRA}; + struct Key_simplex_base_real { Key_simplex_base_real() : key_(-1) {} @@ -106,7 +120,7 @@ class Simplex_tree { Filtration_value minval; Filtration_value maxval; Extended_filtration_data(){} - Extended_filtration_data(Filtration_value vmin, Filtration_value vmax){ minval = vmin; maxval = vmax; } + Extended_filtration_data(Filtration_value vmin, Filtration_value vmax): minval(vmin), maxval(vmax) {} }; typedef typename std::conditional::type Key_simplex_base; @@ -1370,7 +1384,6 @@ class Simplex_tree { // Replacing if(f=max)) would mean that if f is NaN, we replace it with the max of the children. // That seems more useful than keeping NaN. if (!(simplex.second.filtration() >= max_filt_border_value)) { - // Store the filtration modification information modified = true; simplex.second.assign_filtration(max_filt_border_value); @@ -1509,13 +1522,13 @@ class Simplex_tree { Filtration_value minval = efd.minval; Filtration_value maxval = efd.maxval; if (f >= -2 && f <= -1){ - p.first = minval + (maxval-minval)*(f + 2); p.second = UP; + p.first = minval + (maxval-minval)*(f + 2); p.second = Extended_simplex_type::UP; } else if (f >= 1 && f <= 2){ - p.first = minval - (maxval-minval)*(f - 2); p.second = DOWN; + p.first = minval - (maxval-minval)*(f - 2); p.second = Extended_simplex_type::DOWN; } else{ - p.first = -3; p.second = EXTRA; + p.first = std::numeric_limits::quiet_NaN(); p.second = Extended_simplex_type::EXTRA; } return p; }; diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index b6284af4..595f22bb 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -58,7 +58,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": bool prune_above_filtration(double filtration) bool make_filtration_non_decreasing() void compute_extended_filtration() - vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm) + vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) # Iterators over Simplex tree pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) Simplex_tree_simplices_iterator get_simplices_iterator_begin() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 5b850462..bcb1578d 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -411,7 +411,7 @@ cdef class SimplexTree: .. note:: Note that this code creates an extra vertex internally, so you should make sure that - the Simplex_tree does not contain a vertex with the largest Vertex_handle. + the Simplex_tree does not contain a vertex with the largest possible value (i.e., 4294967295). """ return self.get_ptr().compute_extended_filtration() @@ -422,18 +422,16 @@ cdef class SimplexTree: :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. :type homology_coeff_field: int. - :param min_persistence: The minimum persistence value to take into + :param min_persistence: The minimum persistence value (i.e., the absolute value of the difference between the persistence diagram point coordinates) to take into account (strictly greater than min_persistence). Default value is 0.0. Sets min_persistence to -1.0 to see all values. :type min_persistence: float. - :returns: A vector of four persistence diagrams. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: - This function should be called only if :func:`extend_filtration()`, - :func:`initialize_filtration()`, - and (optionally) :func:`persistence()` have been called first! + This function should be called only if :func:`extend_filtration()` has been called first! .. note:: @@ -442,14 +440,11 @@ cdef class SimplexTree: performed on these values during the computation of extended persistence. """ cdef vector[pair[int, pair[double, double]]] persistence_result - if self.pcohptr == NULL: - self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False) - if self.pcohptr != NULL: - self.pcohptr.get_persistence(homology_coeff_field, min_persistence) if self.pcohptr != NULL: - pairs = self.pcohptr.persistence_pairs() - persistence_result = [(len(splx1)-1, [self.filtration(splx1), self.filtration(splx2)]) for [splx1, splx2] in pairs] - return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result) + del self.pcohptr + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False) + persistence_result = self.pcohptr.get_persistence(homology_coeff_field, -1.) + return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence) def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index a6b1a06e..1a18aed6 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -38,7 +38,6 @@ class Simplex_tree_interface : public Simplex_tree { using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator; using Complex_simplex_iterator = typename Base::Complex_simplex_iterator; using Extended_filtration_data = typename Base::Extended_filtration_data; - using Extended_simplex_type = typename Base::Extended_simplex_type; public: @@ -124,31 +123,34 @@ class Simplex_tree_interface : public Simplex_tree { void compute_extended_filtration() { this->efd = this->extend_filtration(); + this->initialize_filtration(); return; } - std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm){ + std::vector>>> compute_extended_persistence_subdiagrams(const std::vector>>& dgm, Filtration_value min_persistence){ std::vector>>> new_dgm(4); for (unsigned int i = 0; i < dgm.size(); i++){ std::pair px = this->decode_extended_filtration(dgm[i].second.first, this->efd); std::pair py = this->decode_extended_filtration(dgm[i].second.second, this->efd); std::pair> pd_point = std::make_pair(dgm[i].first, std::make_pair(px.first, py.first)); - //Ordinary - if (px.second == Base::UP && py.second == Base::UP){ - new_dgm[0].push_back(pd_point); - } - // Relative - else if (px.second == Base::DOWN && py.second == Base::DOWN){ - new_dgm[1].push_back(pd_point); - } - else{ - // Extended+ - if (px.first < py.first){ - new_dgm[2].push_back(pd_point); + if(std::abs(px.first - py.first) > min_persistence){ + //Ordinary + if (px.second == Extended_simplex_type::UP && py.second == Extended_simplex_type::UP){ + new_dgm[0].push_back(pd_point); + } + // Relative + else if (px.second == Extended_simplex_type::DOWN && py.second == Extended_simplex_type::DOWN){ + new_dgm[1].push_back(pd_point); } - //Extended- else{ - new_dgm[3].push_back(pd_point); + // Extended+ + if (px.first < py.first){ + new_dgm[2].push_back(pd_point); + } + //Extended- + else{ + new_dgm[3].push_back(pd_point); + } } } } diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 20f6aabf..70b26e97 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -291,10 +291,8 @@ def test_extend_filtration(): ([5], 6.0) ] - st.extend_filtration() - st.initialize_filtration() - + assert list(st.get_filtration()) == [ ([6], -3.0), ([0], -2.0), @@ -323,7 +321,7 @@ def test_extend_filtration(): ([0, 3, 6], 2.0) ] - dgms = st.extended_persistence() + dgms = st.extended_persistence(min_persistence=-1.) assert dgms[0][0][1][0] == pytest.approx(2.) assert dgms[0][0][1][1] == pytest.approx(3.) @@ -334,7 +332,6 @@ def test_extend_filtration(): assert dgms[3][0][1][0] == pytest.approx(6.) assert dgms[3][0][1][1] == pytest.approx(1.) - def test_simplices_iterator(): st = SimplexTree() -- cgit v1.2.3 From cb838b2ea4a4db9c54f71103001bdafb90766306 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 24 Mar 2020 06:37:00 +0100 Subject: merge https://github.com/mglisse/gudhi-devel/tree/alpha-cache and fix conflicts --- src/Alpha_complex/include/gudhi/Alpha_complex.h | 89 ++++++++++--------------- 1 file changed, 37 insertions(+), 52 deletions(-) diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index 1b5d6997..eb4ef427 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -132,6 +132,8 @@ class Alpha_complex { Delaunay_triangulation* triangulation_; /** \brief Kernel for triangulation_ functions access.*/ Kernel kernel_; + /** \brief Cache for geometric constructions: circumcenter and squared radius of a simplex.*/ + std::vector> cache_; public: /** \brief Alpha_complex constructor from an OFF file name. @@ -246,6 +248,24 @@ class Alpha_complex { } } + template + auto& get_cache(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) { + auto k = cplx.key(s); + if(k==cplx.null_key()){ + k = cache_.size(); + cplx.assign_key(s, k); + // Use a transform_range? Check the impact on perf. + thread_local std::vector v; + v.clear(); + for (auto vertex : cplx.simplex_vertex_range(s)) + v.push_back(get_point(vertex)); + Point_d c = kernel_.construct_circumcenter_d_object()(v.cbegin(), v.cend()); + typename Kernel::FT r = kernel_.squared_distance_d_object()(c, v[0]); + cache_.emplace_back(std::move(c), std::move(r)); + } + return cache_[k]; + } + public: /** \brief Inserts all Delaunay triangulation into the simplicial complex. * It also computes the filtration values accordingly to the \ref createcomplexalgorithm if default_filtration_value @@ -324,46 +344,28 @@ class Alpha_complex { if (!default_filtration_value) { // -------------------------------------------------------------------------------------------- - // Will be re-used many times - Vector_of_CGAL_points pointVector; // ### For i : d -> 0 for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) { // ### Foreach Sigma of dim i for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) { int f_simplex_dim = complex.dimension(f_simplex); if (decr_dim == f_simplex_dim) { - pointVector.clear(); - #ifdef DEBUG_TRACES - std::clog << "Sigma of dim " << decr_dim << " is"; - #endif // DEBUG_TRACES - for (auto vertex : complex.simplex_vertex_range(f_simplex)) { - pointVector.push_back(get_point(vertex)); - #ifdef DEBUG_TRACES - std::clog << " " << vertex; - #endif // DEBUG_TRACES - } - #ifdef DEBUG_TRACES - std::clog << std::endl; - #endif // DEBUG_TRACES // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma) if (std::isnan(complex.filtration(f_simplex))) { Filtration_value alpha_complex_filtration = 0.0; // No need to compute squared_radius on a single point - alpha is 0.0 if (f_simplex_dim > 0) { - // squared_radius function initialization - Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object(); - - CGAL::NT_converter cv; - auto sqrad = squared_radius(pointVector.begin(), pointVector.end()); - #if CGAL_VERSION_NR >= 1050000000 + auto const& sqrad = get_cache(complex, f_simplex).second; +#if CGAL_VERSION_NR >= 1050000000 if(exact) CGAL::exact(sqrad); - #endif +#endif + CGAL::NT_converter cv; alpha_complex_filtration = cv(sqrad); } complex.assign_filtration(f_simplex, alpha_complex_filtration); - #ifdef DEBUG_TRACES +#ifdef DEBUG_TRACES std::clog << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl; - #endif // DEBUG_TRACES +#endif // DEBUG_TRACES } // No need to propagate further, unweighted points all have value 0 if (decr_dim > 1) @@ -388,9 +390,7 @@ class Alpha_complex { void propagate_alpha_filtration(SimplicialComplexForAlpha& complex, Simplex_handle f_simplex) { // From SimplicialComplexForAlpha type required to assign filtration values. typedef typename SimplicialComplexForAlpha::Filtration_value Filtration_value; -#ifdef DEBUG_TRACES typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle; -#endif // DEBUG_TRACES // ### Foreach Tau face of Sigma for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) { @@ -414,33 +414,18 @@ class Alpha_complex { #endif // DEBUG_TRACES // ### Else } else { - // insert the Tau points in a vector for is_gabriel function - Vector_of_CGAL_points pointVector; -#ifdef DEBUG_TRACES - Vertex_handle vertexForGabriel = Vertex_handle(); -#endif // DEBUG_TRACES - for (auto vertex : complex.simplex_vertex_range(f_boundary)) { - pointVector.push_back(get_point(vertex)); - } - // Retrieve the Sigma point that is not part of Tau - parameter for is_gabriel function - Point_d point_for_gabriel; - for (auto vertex : complex.simplex_vertex_range(f_simplex)) { - point_for_gabriel = get_point(vertex); - if (std::find(pointVector.begin(), pointVector.end(), point_for_gabriel) == pointVector.end()) { -#ifdef DEBUG_TRACES - // vertex is not found in Tau - vertexForGabriel = vertex; -#endif // DEBUG_TRACES - // No need to continue loop - break; - } - } - // is_gabriel function initialization - Is_Gabriel is_gabriel = kernel_.side_of_bounded_sphere_d_object(); - bool is_gab = is_gabriel(pointVector.begin(), pointVector.end(), point_for_gabriel) - != CGAL::ON_BOUNDED_SIDE; + // Find which vertex of f_simplex is missing in f_boundary. We could actually write a variant of boundary_simplex_range that gives pairs (f_boundary, vertex). We rely on the fact that simplex_vertex_range is sorted. + auto longlist = complex.simplex_vertex_range(f_simplex); + auto shortlist = complex.simplex_vertex_range(f_boundary); + auto longiter = std::begin(longlist); + auto shortiter = std::begin(shortlist); + auto enditer = std::end(shortlist); + while(shortiter != enditer && *longiter == *shortiter) { ++longiter; ++shortiter; } + Vertex_handle extra = *longiter; + auto const& cache=get_cache(complex, f_boundary); + bool is_gab = kernel_.squared_distance_d_object()(cache.first, get_point(extra)) >= cache.second; #ifdef DEBUG_TRACES - std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl; + std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << extra << std::endl; #endif // DEBUG_TRACES // ### If Tau is not Gabriel of Sigma if (false == is_gab) { -- cgit v1.2.3 From ec4a9583adaa73c01b05a4b30425581ed7256379 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 24 Mar 2020 14:50:53 +0100 Subject: Remove min_persistence from generators It is supposed to be handled in persistence() already. --- src/python/CMakeLists.txt | 1 + src/python/gudhi/simplex_tree.pxd | 4 ++-- src/python/gudhi/simplex_tree.pyx | 18 ++++-------------- src/python/include/Persistent_cohomology_interface.h | 8 ++------ src/python/test/test_simplex_generators.py | 2 +- 5 files changed, 10 insertions(+), 23 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index f00966a5..fb219884 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -374,6 +374,7 @@ if(PYTHONINTERP_FOUND) ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py) add_gudhi_py_test(test_simplex_tree) + add_gudhi_py_test(test_simplex_generators) # Witness add_test(NAME witness_complex_from_nearest_landmark_table_py_test diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 44789365..4038b41d 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -75,5 +75,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": vector[pair[double,double]] intervals_in_dimension(int dimension) void write_output_diagram(string diagram_file_name) vector[pair[vector[int], vector[int]]] persistence_pairs() - pair[vector[vector[int]], vector[vector[int]]] lower_star_generators(double) - pair[vector[vector[int]], vector[vector[int]]] flag_generators(double) + pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() + pair[vector[vector[int]], vector[vector[int]]] flag_generators() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index faa9f9d8..beb40bc4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -526,15 +526,10 @@ cdef class SimplexTree: print("intervals_in_dim function requires persistence function" " to be launched first.") - def lower_star_persistence_generators(self, min_persistence=0.): + def lower_star_persistence_generators(self): """Assuming this is a lower-star filtration, this function returns the persistence pairs, where each simplex is replaced with the vertex that gave it its filtration value. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Set min_persistence to -1.0 to see all values. - :type min_persistence: float. :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity, and second the essential features, grouped by dimension, with one vertex each :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]] @@ -542,22 +537,17 @@ cdef class SimplexTree: :note: lower_star_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.lower_star_generators(min_persistence) + gen = self.pcohptr.lower_star_generators() normal = [np_array(d).reshape(-1,2) for d in gen.first] infinite = [np_array(d) for d in gen.second] return (normal, infinite) else: print("lower_star_persistence_generators() requires that persistence() be called first.") - def flag_persistence_generators(self, min_persistence=0.): + def flag_persistence_generators(self): """Assuming this is a flag complex, this function returns the persistence pairs, where each simplex is replaced with the vertices of the edges that gave it its filtration value. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Set min_persistence to -1.0 to see all values. - :type min_persistence: float. :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death; then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity; then the connected components, with one vertex each; @@ -567,7 +557,7 @@ cdef class SimplexTree: :note: flag_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.flag_generators(min_persistence) + gen = self.pcohptr.flag_generators() if len(gen.first) == 0: normal0 = numpy.empty((0,3)) normals = [] diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 3ce40af5..3074389c 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -100,7 +100,7 @@ persistent_cohomology::Persistent_cohomology>, std::vector>> Generators; - Generators lower_star_generators(double min_persistence) { + Generators lower_star_generators() { Generators out; // diags[i] should be interpreted as vector> auto& diags = out.first; @@ -109,8 +109,6 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); - if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) - continue; int dim = stptr_->dimension(s); auto v = stptr_->vertex_with_same_filtration(s); if(t == stptr_->null_simplex()) { @@ -128,7 +126,7 @@ persistent_cohomology::Persistent_cohomology> and other diags[i] as vector> auto& diags = out.first; @@ -137,8 +135,6 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); - if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) - continue; int dim = stptr_->dimension(s); bool infinite = t == stptr_->null_simplex(); if(infinite) { diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py index efb5f8e3..e3bdc094 100755 --- a/src/python/test/test_simplex_generators.py +++ b/src/python/test/test_simplex_generators.py @@ -37,7 +37,7 @@ def test_lower_star_generators(): st.assign_filtration([1], 2) st.make_filtration_non_decreasing() st.persistence(min_persistence=-1) - g = st.lower_star_persistence_generators(min_persistence=-1) + g = st.lower_star_persistence_generators() assert len(g[0]) == 2 assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]]) assert np.array_equal(g[0][1], [[1, 1]]) -- cgit v1.2.3 From d5c8dc1ba4d00ead5875b97e164d07f6180526b0 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 24 Mar 2020 20:31:05 +0100 Subject: print -> assert --- src/python/gudhi/simplex_tree.pyx | 47 +++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index beb40bc4..dcf1b46e 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -536,13 +536,11 @@ cdef class SimplexTree: :note: lower_star_persistence_generators requires that `persistence()` be called first. """ - if self.pcohptr != NULL: - gen = self.pcohptr.lower_star_generators() - normal = [np_array(d).reshape(-1,2) for d in gen.first] - infinite = [np_array(d) for d in gen.second] - return (normal, infinite) - else: - print("lower_star_persistence_generators() requires that persistence() be called first.") + assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first." + gen = self.pcohptr.lower_star_generators() + normal = [np_array(d).reshape(-1,2) for d in gen.first] + infinite = [np_array(d) for d in gen.second] + return (normal, infinite) def flag_persistence_generators(self): """Assuming this is a flag complex, this function returns the persistence pairs, @@ -556,23 +554,20 @@ cdef class SimplexTree: :note: flag_persistence_generators requires that `persistence()` be called first. """ - if self.pcohptr != NULL: - gen = self.pcohptr.flag_generators() - if len(gen.first) == 0: - normal0 = numpy.empty((0,3)) - normals = [] - else: - l = iter(gen.first) - normal0 = np_array(next(l)).reshape(-1,3) - normals = [np_array(d).reshape(-1,4) for d in l] - if len(gen.second) == 0: - infinite0 = numpy.empty(0) - infinites = [] - else: - l = iter(gen.second) - infinite0 = np_array(next(l)) - infinites = [np_array(d).reshape(-1,2) for d in l] - - return (normal0, normals, infinite0, infinites) + assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first." + gen = self.pcohptr.flag_generators() + if len(gen.first) == 0: + normal0 = numpy.empty((0,3)) + normals = [] + else: + l = iter(gen.first) + normal0 = np_array(next(l)).reshape(-1,3) + normals = [np_array(d).reshape(-1,4) for d in l] + if len(gen.second) == 0: + infinite0 = numpy.empty(0) + infinites = [] else: - print("flag_persistence_generators() requires that persistence() be called first.") + l = iter(gen.second) + infinite0 = np_array(next(l)) + infinites = [np_array(d).reshape(-1,2) for d in l] + return (normal0, normals, infinite0, infinites) -- cgit v1.2.3 From 20ba972d2a7fd14e564ce4adb3921f3f8190fc71 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 25 Mar 2020 13:00:58 -0400 Subject: update biblio --- biblio/bibliography.bib | 36 +++++++++++++++++++-------- src/Simplex_tree/include/gudhi/Simplex_tree.h | 4 +-- src/python/gudhi/simplex_tree.pyx | 2 +- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index 3bbe7960..b017a07e 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -7,11 +7,13 @@ } @article{Carriere17c, - author = {Carri\`ere, Mathieu and Michel, Bertrand and Oudot, Steve}, - title = {{Statistical Analysis and Parameter Selection for Mapper}}, - journal = {CoRR}, - volume = {abs/1706.00204}, - year = {2017} +author = {Carri{\`{e}}re, Mathieu and Michel, Bertrand and Oudot, Steve}, +journal = {Journal of Machine Learning Research}, +pages = {1--39}, +publisher = {JMLR.org}, +title = {{Statistical analysis and parameter selection for Mapper}}, +volume = {19}, +year = {2018} } @inproceedings{Dey13, @@ -23,11 +25,14 @@ } @article{Carriere16, - title={{Structure and Stability of the 1-Dimensional Mapper}}, - author={Carri\`ere, Mathieu and Oudot, Steve}, - journal={CoRR}, - volume= {abs/1511.05823}, - year={2015} +author = {Carri{\`{e}}re, Mathieu and Oudot, Steve}, +journal = {Foundations of Computational Mathematics}, +number = {6}, +pages = {1333--1396}, +publisher = {Springer-Verlag}, +title = {{Structure and stability of the one-dimensional Mapper}}, +volume = {18}, +year = {2017} } @inproceedings{zigzag_reflection, @@ -36,6 +41,17 @@ year = {2014 $\ \ \ \ \ \ \ \ \ \ \ $ \emph{In Preparation}}, } +@article{Cohen-Steiner2009, +author = {Cohen-Steiner, David and Edelsbrunner, Herbert and Harer, John}, +journal = {Foundations of Computational Mathematics}, +number = {1}, +pages = {79--103}, +publisher = {Springer-Verlag}, +title = {{Extending persistence using Poincar{\'{e}} and Lefschetz duality}}, +volume = {9}, +year = {2009} +} + @misc{gudhi_stpcoh, author = {Cl\'ement Maria}, title = "\textsc{Gudhi}, Simplex Tree and Persistent Cohomology Packages", diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index de97d6f2..60720567 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -51,7 +51,7 @@ namespace Gudhi { * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or * EXTRA (which means the simplex is the cone point). * - * Details may be found in section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z. + * Details may be found in \cite Cohen-Steiner2009 and section 2.2 in \cite Carriere16. * */ enum class Extended_simplex_type {UP, DOWN, EXTRA}; @@ -1507,7 +1507,7 @@ class Simplex_tree { * in the Simplex_tree. Hence, this function also outputs the type of each simplex. It can be either UP (which means * that the simplex was present originally, and is thus part of the ascending extended filtration), DOWN (which means * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or - * EXTRA (which means the simplex is the cone point). Note that if the simplex type is DOWN, the original filtration value + * EXTRA (which means the simplex is the cone point). See the definition of Extended_simplex_type. Note that if the simplex type is DOWN, the original filtration value * is set to be the original filtration value of the corresponding (not coned) original simplex. * \pre This function should be called only if `extend_filtration()` has been called first! * \post The output filtration value is supposed to be the same, but might be a little different, than the diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index bcb1578d..6bb22171 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -427,7 +427,7 @@ cdef class SimplexTree: 0.0. Sets min_persistence to -1.0 to see all values. :type min_persistence: float. - :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See and https://link.springer.com/article/10.1007/s10208-008-9027-z and section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: -- cgit v1.2.3 From b2a549c055c2796fe4eb1e4e4265cdd718753416 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 25 Mar 2020 15:10:35 -0400 Subject: fix biblio --- src/python/gudhi/simplex_tree.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 6bb22171..cc3753e1 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -427,7 +427,7 @@ cdef class SimplexTree: 0.0. Sets min_persistence to -1.0 to see all values. :type min_persistence: float. - :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See and https://link.springer.com/article/10.1007/s10208-008-9027-z and section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: -- cgit v1.2.3 From c8c942c43643131a7ef9899826a7095e497150fe Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:10:26 +0100 Subject: cmake --- .../modules/GUDHI_third_party_libraries.cmake | 3 + src/python/CMakeLists.txt | 14 ++ src/python/gudhi/point_cloud/dtm.py | 40 +++++ src/python/gudhi/point_cloud/knn.py | 193 +++++++++++++++++++++ src/python/test/test_dtm.py | 32 ++++ 5 files changed, 282 insertions(+) create mode 100644 src/python/gudhi/point_cloud/dtm.py create mode 100644 src/python/gudhi/point_cloud/knn.py create mode 100755 src/python/test/test_dtm.py diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 2d010483..c2039674 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -160,6 +160,9 @@ if( PYTHONINTERP_FOUND ) find_python_module("sklearn") find_python_module("ot") find_python_module("pybind11") + find_python_module("torch") + find_python_module("hnswlib") + find_python_module("pykeops") endif() if(NOT GUDHI_PYTHON_PATH) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index f00966a5..d26d3e6e 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -78,6 +78,15 @@ if(PYTHONINTERP_FOUND) if(OT_FOUND) add_gudhi_debug_info("POT version ${OT_VERSION}") endif() + if(HNSWLIB_FOUND) + add_gudhi_debug_info("HNSWlib version ${OT_VERSION}") + endif() + if(TORCH_FOUND) + add_gudhi_debug_info("PyTorch version ${OT_VERSION}") + endif() + if(PYKEOPS_FOUND) + add_gudhi_debug_info("PyKeOps version ${OT_VERSION}") + endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ") @@ -399,6 +408,11 @@ if(PYTHONINTERP_FOUND) # Time Delay add_gudhi_py_test(test_time_delay) + # DTM + if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + add_gudhi_py_test(test_dtm) + endif() + # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) if(MATPLOTLIB_FOUND) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py new file mode 100644 index 00000000..08f9ea60 --- /dev/null +++ b/src/python/gudhi/point_cloud/dtm.py @@ -0,0 +1,40 @@ +from .knn import KNN + + +class DTM: + def __init__(self, k, q=2, **kwargs): + """ + Args: + q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if input_type is 'distance_matrix'. + kwargs: Same parameters as KNN, except that metric="neighbors" means that transform() expects an array with the distances to the k nearest neighbors. + """ + self.k = k + self.q = q + self.params = kwargs + + def fit_transform(self, X, y=None): + return self.fit(X).transform(X) + + def fit(self, X, y=None): + """ + Args: + X (numpy.array): coordinates for mass points + """ + if self.params.setdefault("metric", "euclidean") != "neighbors": + self.knn = KNN(self.k, return_index=False, return_distance=True, **self.params) + self.knn.fit(X) + return self + + def transform(self, X): + """ + Args: + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", or distances to the k nearest neighbors if metric is "neighbors" (if the array has more than k columns, the remaining ones are ignored). + """ + if self.params["metric"] == "neighbors": + distances = X[:, : self.k] + else: + distances = self.knn.transform(X) + distances = distances ** self.q + dtm = distances.sum(-1) / self.k + dtm = dtm ** (1.0 / self.q) + return dtm diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py new file mode 100644 index 00000000..57078f1e --- /dev/null +++ b/src/python/gudhi/point_cloud/knn.py @@ -0,0 +1,193 @@ +import numpy + + +class KNN: + def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs): + """ + Args: + k (int): number of neighbors (including the point itself). + return_index (bool): if True, return the index of each neighbor. + return_distance (bool): if True, return the distance to each neighbor. + implementation (str): Choice of the library that does the real work. + + * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes + large (10+) but the number of points remains low (less than a million). + Only "minkowski" and its aliases are supported. + * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported. + * 'sklearn' for scikit-learn's NearestNeighbors. + Note that this provides in particular an option algorithm="brute". + * 'hnsw' for hnswlib.Index. It is very fast but does not provide guarantees. + Only supports "euclidean" for now. + * None will try to select a sensible one (scipy if possible, scikit-learn otherwise). + metric (str): see `sklearn.neighbors.NearestNeighbors`. + eps (float): relative error when computing nearest neighbors with the cKDTree. + p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2. + n_jobs (int): Number of jobs to schedule for parallel processing of nearest neighbors on the CPU. + If -1 is given all processors are used. Default: 1. + + Additional parameters are forwarded to the backends. + """ + self.k = k + self.return_index = return_index + self.return_distance = return_distance + self.metric = metric + self.params = kwargs + # canonicalize + if metric == "euclidean": + self.params["p"] = 2 + self.metric = "minkowski" + elif metric == "manhattan": + self.params["p"] = 1 + self.metric = "minkowski" + elif metric == "chebyshev": + self.params["p"] = numpy.inf + self.metric = "minkowski" + elif metric == "minkowski": + self.params["p"] = kwargs.get("p", 2) + if self.params.get("implementation") in {"keops", "ckdtree"}: + assert self.metric == "minkowski" + if self.params.get("implementation") == "hnsw": + assert self.metric == "minkowski" and self.params["p"] == 2 + if not self.params.get("implementation"): + if self.metric == "minkowski": + self.params["implementation"] = "ckdtree" + else: + self.params["implementation"] = "sklearn" + + def fit_transform(self, X, y=None): + return self.fit(X).transform(X) + + def fit(self, X, y=None): + """ + Args: + X (numpy.array): coordinates for reference points + """ + self.ref_points = X + if self.params.get("implementation") == "ckdtree": + # sklearn could handle this, but it is much slower + from scipy.spatial import cKDTree + self.kdtree = cKDTree(X) + + if self.params.get("implementation") == "sklearn" and self.metric != "precomputed": + # FIXME: sklearn badly handles "precomputed" + from sklearn.neighbors import NearestNeighbors + + nargs = {k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}} + self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs) + self.nn.fit(X) + + if self.params.get("implementation") == "hnsw": + import hnswlib + self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances + self.graph.init_index(len(X), **{k:v for k,v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}) + n = self.params.get("num_threads") + if n is None: + n = self.params.get("n_jobs", 1) + self.params["num_threads"] = n + self.graph.add_items(X, num_threads=n) + + return self + + def transform(self, X): + """ + Args: + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed" + """ + metric = self.metric + k = self.k + + if metric == "precomputed": + # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural. + X = numpy.array(X) + if self.return_index: + neighbors = numpy.argpartition(X, k - 1)[:, 0:k] + distances = numpy.take_along_axis(X, neighbors, axis=-1) + ngb_order = numpy.argsort(distances, axis=-1) + neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + if self.return_distance: + distances = numpy.take_along_axis(distances, ngb_order, axis=-1) + return neighbors, distances + else: + return neighbors + if self.return_distance: + distances = numpy.partition(X, k - 1)[:, 0:k] + # partition is not guaranteed to sort the lower half, although it often does + distances.sort(axis=-1) + return distances + return None + + if self.params.get("implementation") == "hnsw": + ef = self.params.get("ef") + if ef is not None: + self.graph.set_ef(ef) + neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"]) + # The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn, + # which returns a priority_queue, and then fills the return array backwards with top/pop on the queue. + if self.return_index: + if self.return_distance: + return neighbors, numpy.sqrt(distances) + else: + return neighbors + if self.return_distance: + return numpy.sqrt(distances) + return None + + if self.params.get("implementation") == "keops": + import torch + from pykeops.torch import LazyTensor + + # 'float64' is slow except on super expensive GPUs. Allow it with some param? + XX = torch.tensor(X, dtype=torch.float32) + if X is self.ref_points: + YY = XX + else: + YY = torch.tensor(self.ref_points, dtype=torch.float32) + + p = self.params["p"] + if p == numpy.inf: + # Requires a version of pykeops strictly more recent than 1.3 + mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1) + elif p == 2: # Any even integer? + mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1) + else: + mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1) + + if self.return_index: + if self.return_distance: + distances, neighbors = mat.Kmin_argKmin(k, dim=1) + if p != numpy.inf: + distances = distances ** (1.0 / p) + return neighbors, distances + else: + neighbors = mat.argKmin(k, dim=1) + return neighbors + if self.return_distance: + distances = mat.Kmin(k, dim=1) + if p != numpy.inf: + distances = distances ** (1.0 / p) + return distances + return None + # FIXME: convert everything back to numpy arrays or not? + + if hasattr(self, "kdtree"): + qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}} + distances, neighbors = self.kdtree.query(X, k=self.k, **qargs) + if self.return_index: + if self.return_distance: + return neighbors, distances + else: + return neighbors + if self.return_distance: + return distances + return None + + if self.return_distance: + distances, neighbors = self.nn.kneighbors(X, return_distance=True) + if self.return_index: + return neighbors, distances + else: + return distances + if self.return_index: + neighbors = self.nn.kneighbors(X, return_distance=False) + return neighbors + return None diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py new file mode 100755 index 00000000..57fdd131 --- /dev/null +++ b/src/python/test/test_dtm.py @@ -0,0 +1,32 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Marc Glisse + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +from gudhi.point_cloud.dtm import DTM +import numpy + + +def test_dtm_euclidean(): + pts = numpy.random.rand(1000,4) + k = 3 + dtm = DTM(k,implementation="ckdtree") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="sklearn") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="sklearn",algorithm="brute") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="hnsw") + print(dtm.fit_transform(pts)) + from scipy.spatial.distance import cdist + d = cdist(pts,pts) + dtm = DTM(k,metric="precomputed") + print(dtm.fit_transform(d)) + dtm = DTM(k,implementation="keops") + print(dtm.fit_transform(pts)) + -- cgit v1.2.3 From 5c4c398b99fe1b157d64cd43a4977ce1504ca795 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:25:28 +0100 Subject: HNSWlib doesn't define __version__ --- src/cmake/modules/GUDHI_third_party_libraries.cmake | 21 ++++++++++++++++++++- src/python/CMakeLists.txt | 7 ++++--- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index c2039674..a931b3a1 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -150,6 +150,25 @@ function( find_python_module PYTHON_MODULE_NAME ) endif() endfunction( find_python_module ) +# For modules that do not define module.__version__ +function( find_python_module_no_version PYTHON_MODULE_NAME ) + string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}" + RESULT_VARIABLE PYTHON_MODULE_RESULT + ERROR_VARIABLE PYTHON_MODULE_ERROR) + if(PYTHON_MODULE_RESULT EQUAL 0) + # Remove carriage return + message ("++ Python module ${PYTHON_MODULE_NAME} found") + set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE) + else() + message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME} + - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT} + - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}") + set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE) + endif() +endfunction( find_python_module_no_version ) + if( PYTHONINTERP_FOUND ) find_python_module("cython") find_python_module("pytest") @@ -161,8 +180,8 @@ if( PYTHONINTERP_FOUND ) find_python_module("ot") find_python_module("pybind11") find_python_module("torch") - find_python_module("hnswlib") find_python_module("pykeops") + find_python_module_no_version("hnswlib") endif() if(NOT GUDHI_PYTHON_PATH) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d26d3e6e..ec0ab1ca 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -79,13 +79,14 @@ if(PYTHONINTERP_FOUND) add_gudhi_debug_info("POT version ${OT_VERSION}") endif() if(HNSWLIB_FOUND) - add_gudhi_debug_info("HNSWlib version ${OT_VERSION}") + # Does not have a version number... + add_gudhi_debug_info("HNSWlib found") endif() if(TORCH_FOUND) - add_gudhi_debug_info("PyTorch version ${OT_VERSION}") + add_gudhi_debug_info("PyTorch version ${TORCH_VERSION}") endif() if(PYKEOPS_FOUND) - add_gudhi_debug_info("PyKeOps version ${OT_VERSION}") + add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}") endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") -- cgit v1.2.3 From 7f5bd151220162a54096a7839634ce19fb786f6f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:27:31 +0100 Subject: Add 3 pip runtime dependencies --- .github/test-requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt index 18882792..4f9dcefb 100644 --- a/.github/test-requirements.txt +++ b/.github/test-requirements.txt @@ -6,4 +6,7 @@ matplotlib scipy scikit-learn POT -tensorflow \ No newline at end of file +tensorflow +torch +pykeops +hnswlib -- cgit v1.2.3 From 7ddad8220fdd34fd3ed91e16882feaa3961b2d67 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:59:20 +0100 Subject: license --- src/python/gudhi/point_cloud/dtm.py | 9 +++++++++ src/python/gudhi/point_cloud/knn.py | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 08f9ea60..839e7452 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -1,3 +1,12 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Marc Glisse +# +# Copyright (C) 2020 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + from .knn import KNN diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 57078f1e..943d4e9f 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -1,3 +1,12 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Marc Glisse +# +# Copyright (C) 2020 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + import numpy -- cgit v1.2.3 From 7120b186471828a9570fdeef37900bd8b98d0d31 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 23:06:06 +0100 Subject: license --- src/python/doc/point_cloud_sum.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc index 0a159680..ecc18951 100644 --- a/src/python/doc/point_cloud_sum.inc +++ b/src/python/doc/point_cloud_sum.inc @@ -6,7 +6,7 @@ | | :math:`(y_1, y_2, \ldots, y_d)` | | | | | | :Since: GUDHI 2.0.0 | | | | | - | | | :License: MIT (`GPL v3 `_) | + | | | :License: MIT (`GPL v3 `_, BSD-3-Clause, Apache-2.0) | | | Parts of this package require CGAL. | | | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | | | | | -- cgit v1.2.3 From af35ea5b4ce631ae826f1db1940798f254aba658 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 23:39:59 +0100 Subject: clean-up use of "implementation" --- src/python/gudhi/point_cloud/knn.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 943d4e9f..a4ea3acd 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -72,12 +72,12 @@ class KNN: X (numpy.array): coordinates for reference points """ self.ref_points = X - if self.params.get("implementation") == "ckdtree": + if self.params["implementation"] == "ckdtree": # sklearn could handle this, but it is much slower from scipy.spatial import cKDTree self.kdtree = cKDTree(X) - if self.params.get("implementation") == "sklearn" and self.metric != "precomputed": + if self.params["implementation"] == "sklearn" and self.metric != "precomputed": # FIXME: sklearn badly handles "precomputed" from sklearn.neighbors import NearestNeighbors @@ -85,7 +85,7 @@ class KNN: self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs) self.nn.fit(X) - if self.params.get("implementation") == "hnsw": + if self.params["implementation"] == "hnsw": import hnswlib self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances self.graph.init_index(len(X), **{k:v for k,v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}) @@ -125,7 +125,7 @@ class KNN: return distances return None - if self.params.get("implementation") == "hnsw": + if self.params["implementation"] == "hnsw": ef = self.params.get("ef") if ef is not None: self.graph.set_ef(ef) @@ -141,7 +141,7 @@ class KNN: return numpy.sqrt(distances) return None - if self.params.get("implementation") == "keops": + if self.params["implementation"] == "keops": import torch from pykeops.torch import LazyTensor @@ -178,7 +178,7 @@ class KNN: return None # FIXME: convert everything back to numpy arrays or not? - if hasattr(self, "kdtree"): + if self.params["implementation"] == "ckdtree": qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}} distances, neighbors = self.kdtree.query(X, k=self.k, **qargs) if self.return_index: @@ -190,6 +190,7 @@ class KNN: return distances return None + assert self.params["implementation"] == "sklearn" if self.return_distance: distances, neighbors = self.nn.kneighbors(X, return_distance=True) if self.return_index: -- cgit v1.2.3 From f74c71ca8e474ff927cae029ea63329d30293582 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 27 Mar 2020 13:43:58 +0100 Subject: Improve coverage --- src/python/gudhi/point_cloud/dtm.py | 2 ++ src/python/test/test_dtm.py | 48 +++++++++++++++++++++++++------------ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 839e7452..541b74a6 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -30,6 +30,8 @@ class DTM: X (numpy.array): coordinates for mass points """ if self.params.setdefault("metric", "euclidean") != "neighbors": + # KNN gives sorted distances, which is unnecessary here. + # Maybe add a parameter to say we don't need sorting? self.knn = KNN(self.k, return_index=False, return_distance=True, **self.params) self.knn.fit(X) return self diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 57fdd131..841f8c3c 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -10,23 +10,41 @@ from gudhi.point_cloud.dtm import DTM import numpy +import pytest -def test_dtm_euclidean(): - pts = numpy.random.rand(1000,4) +def test_dtm_compare_euclidean(): + pts = numpy.random.rand(1000, 4) k = 3 - dtm = DTM(k,implementation="ckdtree") - print(dtm.fit_transform(pts)) - dtm = DTM(k,implementation="sklearn") - print(dtm.fit_transform(pts)) - dtm = DTM(k,implementation="sklearn",algorithm="brute") - print(dtm.fit_transform(pts)) - dtm = DTM(k,implementation="hnsw") - print(dtm.fit_transform(pts)) + dtm = DTM(k, implementation="ckdtree") + r0 = dtm.fit_transform(pts) + dtm = DTM(k, implementation="sklearn") + r1 = dtm.fit_transform(pts) + assert r1 == pytest.approx(r0) + dtm = DTM(k, implementation="sklearn", algorithm="brute") + r2 = dtm.fit_transform(pts) + assert r2 == pytest.approx(r0) + dtm = DTM(k, implementation="hnsw") + r3 = dtm.fit_transform(pts) + assert r3 == pytest.approx(r0) from scipy.spatial.distance import cdist - d = cdist(pts,pts) - dtm = DTM(k,metric="precomputed") - print(dtm.fit_transform(d)) - dtm = DTM(k,implementation="keops") - print(dtm.fit_transform(pts)) + d = cdist(pts, pts) + dtm = DTM(k, metric="precomputed") + r4 = dtm.fit_transform(d) + assert r4 == pytest.approx(r0) + dtm = DTM(k, implementation="keops") + r5 = dtm.fit_transform(pts) + assert r5 == pytest.approx(r0) + + +def test_dtm_precomputed(): + dist = numpy.array([[1.0, 3, 8], [1, 5, 5], [0, 2, 3]]) + dtm = DTM(2, q=1, metric="neighbors") + r = dtm.fit_transform(dist) + assert r == pytest.approx([2.0, 3, 1]) + + dist = numpy.array([[2.0, 2], [0, 1], [3, 4]]) + dtm = DTM(2, q=2, metric="neighbors") + r = dtm.fit_transform(dist) + assert r == pytest.approx([2.0, .707, 3.5355], rel=.01) -- cgit v1.2.3 From 03376ffe0f6060864ee8908893297f8800b7b8d1 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 27 Mar 2020 20:27:10 +0100 Subject: doc --- src/python/doc/point_cloud.rst | 17 +++++++++++++++-- src/python/gudhi/point_cloud/dtm.py | 6 +++++- src/python/gudhi/point_cloud/knn.py | 31 ++++++++++++++++++------------- src/python/test/test_dtm.py | 2 +- 4 files changed, 39 insertions(+), 17 deletions(-) diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index c0d4b303..351b0786 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -21,10 +21,23 @@ Subsampling :special-members: :show-inheritance: -TimeDelayEmbedding ------------------- +Time Delay Embedding +-------------------- .. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding :members: :special-members: __call__ +Nearest neighbors +----------------- + +.. automodule:: gudhi.point_cloud.knn + :members: + :special-members: __init__ + +Distance to measure +------------------- + +.. automodule:: gudhi.point_cloud.dtm + :members: + :special-members: __init__ diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 541b74a6..e4096c5e 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -11,11 +11,15 @@ from .knn import KNN class DTM: + """ + Class to compute the distance to the empirical measure defined by a point set. + """ + def __init__(self, k, q=2, **kwargs): """ Args: q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if input_type is 'distance_matrix'. - kwargs: Same parameters as KNN, except that metric="neighbors" means that transform() expects an array with the distances to the k nearest neighbors. + kwargs: Same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. """ self.k = k self.q = q diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index a4ea3acd..02448530 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -11,6 +11,10 @@ import numpy class KNN: + """ + Class wrapping several implementations for computing the k nearest neighbors in a point set. + """ + def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs): """ Args: @@ -19,22 +23,17 @@ class KNN: return_distance (bool): if True, return the distance to each neighbor. implementation (str): Choice of the library that does the real work. - * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes - large (10+) but the number of points remains low (less than a million). - Only "minkowski" and its aliases are supported. + * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes large (10+) but the number of points remains low (less than a million). Only "minkowski" and its aliases are supported. * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported. - * 'sklearn' for scikit-learn's NearestNeighbors. - Note that this provides in particular an option algorithm="brute". - * 'hnsw' for hnswlib.Index. It is very fast but does not provide guarantees. - Only supports "euclidean" for now. + * 'sklearn' for scikit-learn's NearestNeighbors. Note that this provides in particular an option algorithm="brute". + * 'hnsw' for hnswlib.Index. It can be very fast but does not provide guarantees. Only supports "euclidean" for now. * None will try to select a sensible one (scipy if possible, scikit-learn otherwise). metric (str): see `sklearn.neighbors.NearestNeighbors`. eps (float): relative error when computing nearest neighbors with the cKDTree. p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2. n_jobs (int): Number of jobs to schedule for parallel processing of nearest neighbors on the CPU. If -1 is given all processors are used. Default: 1. - - Additional parameters are forwarded to the backends. + kwargs: additional parameters are forwarded to the backends. """ self.k = k self.return_index = return_index @@ -75,20 +74,26 @@ class KNN: if self.params["implementation"] == "ckdtree": # sklearn could handle this, but it is much slower from scipy.spatial import cKDTree + self.kdtree = cKDTree(X) if self.params["implementation"] == "sklearn" and self.metric != "precomputed": # FIXME: sklearn badly handles "precomputed" from sklearn.neighbors import NearestNeighbors - nargs = {k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}} + nargs = { + k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"} + } self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs) self.nn.fit(X) if self.params["implementation"] == "hnsw": import hnswlib - self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances - self.graph.init_index(len(X), **{k:v for k,v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}) + + self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances + self.graph.init_index( + len(X), **{k: v for k, v in self.params.items() if k in {"ef_construction", "M", "random_seed"}} + ) n = self.params.get("num_threads") if n is None: n = self.params.get("n_jobs", 1) @@ -154,7 +159,7 @@ class KNN: p = self.params["p"] if p == numpy.inf: - # Requires a version of pykeops strictly more recent than 1.3 + # Requires pykeops 1.4 or later mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1) elif p == 2: # Any even integer? mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 841f8c3c..93b13e1a 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -47,4 +47,4 @@ def test_dtm_precomputed(): dist = numpy.array([[2.0, 2], [0, 1], [3, 4]]) dtm = DTM(2, q=2, metric="neighbors") r = dtm.fit_transform(dist) - assert r == pytest.approx([2.0, .707, 3.5355], rel=.01) + assert r == pytest.approx([2.0, 0.707, 3.5355], rel=0.01) -- cgit v1.2.3 From c0d9cdcce4b45f941b096d6fcfdf6bf3b82115cf Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 08:51:26 +0100 Subject: pip --no-cache-dir See if that helps with memory issues... --- Dockerfile_for_circleci_image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index 1eededb5..20754e2a 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -51,7 +51,7 @@ ADD .github/build-requirements.txt / ADD .github/test-requirements.txt / RUN pip3 install -r build-requirements.txt -RUN pip3 install -r test-requirements.txt +RUN pip3 --no-cache-dir install -r test-requirements.txt # apt clean up RUN apt autoremove && rm -rf /var/lib/apt/lists/* -- cgit v1.2.3 From 68839b95e7751afd04155cd2565cc53362f01fa2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 10:41:50 +0100 Subject: Missing test --- src/python/CMakeLists.txt | 1 + src/python/test/test_knn.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100755 src/python/test/test_knn.py diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index ec0ab1ca..d7a6a4db 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -411,6 +411,7 @@ if(PYTHONINTERP_FOUND) # DTM if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + add_gudhi_py_test(test_knn) add_gudhi_py_test(test_dtm) endif() diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py new file mode 100755 index 00000000..e455fb48 --- /dev/null +++ b/src/python/test/test_knn.py @@ -0,0 +1,82 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Marc Glisse + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +from gudhi.point_cloud.knn import KNN +import numpy as np +import pytest + + +def test_knn_explicit(): + base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) + query = np.array([[1.0, 1], [2, 2], [4, 4]]) + knn = KNN(2, metric="manhattan", return_distance=True, return_index=True) + knn.fit(base) + r = knn.transform(query) + assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]])) + assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]])) + + knn = KNN(2, metric="chebyshev", return_distance=True, return_index=False) + knn.fit(base) + r = knn.transform(query) + assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) + r = ( + KNN(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops") + .fit(base) + .transform(query) + ) + assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) + + knn = KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True) + knn.fit(base) + r = knn.transform(query) + assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) + r = ( + KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops") + .fit(base) + .transform(query) + ) + assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) + + dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]]) + knn = KNN(2, metric="precomputed", return_index=True, return_distance=False) + r = knn.fit_transform(dist) + assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) + knn = KNN(2, metric="precomputed", return_index=True, return_distance=True) + r = knn.fit_transform(dist) + assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) + assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) + + +def test_knn_compare(): + base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) + query = np.array([[1.0, 1], [2, 2], [4, 4]]) + r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=False).fit(base).transform(query) + r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=False).fit(base).transform(query) + r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query) + r3 = KNN(2, implementation="keops", return_index=True, return_distance=False).fit(base).transform(query) + assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3) + + r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=True).fit(base).transform(query) + r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=True).fit(base).transform(query) + r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query) + r3 = KNN(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query) + assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0]) + d0 = pytest.approx(r0[1]) + assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0 + + +def test_knn_nop(): + # This doesn't look super useful... + p = np.array([[0.0]]) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="sklearn").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="ckdtree").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5).fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="keops").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, metric="precomputed").fit_transform(p) -- cgit v1.2.3 From 35a12b553c85af8ce31629b90a27a7071b0cc379 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 11:48:43 +0100 Subject: Doc tweaks, default DTM exponent --- src/python/doc/point_cloud.rst | 6 ++++-- src/python/doc/point_cloud_sum.inc | 4 ++-- src/python/gudhi/point_cloud/dtm.py | 17 ++++++++++++----- src/python/gudhi/point_cloud/knn.py | 6 +++--- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index 351b0786..192f70db 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -28,11 +28,12 @@ Time Delay Embedding :members: :special-members: __call__ -Nearest neighbors ------------------ +K nearest neighbors +------------------- .. automodule:: gudhi.point_cloud.knn :members: + :undoc-members: :special-members: __init__ Distance to measure @@ -40,4 +41,5 @@ Distance to measure .. automodule:: gudhi.point_cloud.dtm :members: + :undoc-members: :special-members: __init__ diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc index ecc18951..d4761aba 100644 --- a/src/python/doc/point_cloud_sum.inc +++ b/src/python/doc/point_cloud_sum.inc @@ -2,8 +2,8 @@ :widths: 30 40 30 +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, etc. | :Author: Vincent Rouvreau | - | | :math:`(y_1, y_2, \ldots, y_d)` | | | + | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, | :Authors: Vincent Rouvreau, Marc Glisse, Masatoshi Takenouchi | + | | :math:`(y_1, y_2, \ldots, y_d)` | find neighbors, embed time series in higher dimension, etc. | | | | | :Since: GUDHI 2.0.0 | | | | | | | | :License: MIT (`GPL v3 `_, BSD-3-Clause, Apache-2.0) | diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index e4096c5e..520cbea8 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -15,10 +15,11 @@ class DTM: Class to compute the distance to the empirical measure defined by a point set. """ - def __init__(self, k, q=2, **kwargs): + def __init__(self, k, q=None, **kwargs): """ Args: - q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if input_type is 'distance_matrix'. + k (int): number of neighbors (possibly including the point itself). + q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if metric is "neighbors" or "distance_matrix". kwargs: Same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. """ self.k = k @@ -31,7 +32,7 @@ class DTM: def fit(self, X, y=None): """ Args: - X (numpy.array): coordinates for mass points + X (numpy.array): coordinates for mass points. """ if self.params.setdefault("metric", "euclidean") != "neighbors": # KNN gives sorted distances, which is unnecessary here. @@ -45,11 +46,17 @@ class DTM: Args: X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", or distances to the k nearest neighbors if metric is "neighbors" (if the array has more than k columns, the remaining ones are ignored). """ + q = self.q + if q is None: + if self.params["metric"] in {"neighbors", "precomputed"}: + q = 2 + else: + q = len(X[0]) if self.params["metric"] == "neighbors": distances = X[:, : self.k] else: distances = self.knn.transform(X) - distances = distances ** self.q + distances = distances ** q dtm = distances.sum(-1) / self.k - dtm = dtm ** (1.0 / self.q) + dtm = dtm ** (1.0 / q) return dtm diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 02448530..31e4fc9f 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -18,7 +18,7 @@ class KNN: def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs): """ Args: - k (int): number of neighbors (including the point itself). + k (int): number of neighbors (possibly including the point itself). return_index (bool): if True, return the index of each neighbor. return_distance (bool): if True, return the distance to each neighbor. implementation (str): Choice of the library that does the real work. @@ -68,7 +68,7 @@ class KNN: def fit(self, X, y=None): """ Args: - X (numpy.array): coordinates for reference points + X (numpy.array): coordinates for reference points. """ self.ref_points = X if self.params["implementation"] == "ckdtree": @@ -105,7 +105,7 @@ class KNN: def transform(self, X): """ Args: - X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed" + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed". """ metric = self.metric k = self.k -- cgit v1.2.3 From a911f9707d44259a38ae3dbb6fbcec75779fc639 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 12:17:29 +0100 Subject: doc --- src/python/gudhi/point_cloud/dtm.py | 2 +- src/python/gudhi/point_cloud/knn.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 520cbea8..3ac69f31 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -20,7 +20,7 @@ class DTM: Args: k (int): number of neighbors (possibly including the point itself). q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if metric is "neighbors" or "distance_matrix". - kwargs: Same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. + kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. """ self.k = k self.q = q diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 31e4fc9f..bb7757f2 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -21,7 +21,7 @@ class KNN: k (int): number of neighbors (possibly including the point itself). return_index (bool): if True, return the index of each neighbor. return_distance (bool): if True, return the distance to each neighbor. - implementation (str): Choice of the library that does the real work. + implementation (str): choice of the library that does the real work. * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes large (10+) but the number of points remains low (less than a million). Only "minkowski" and its aliases are supported. * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported. @@ -31,7 +31,7 @@ class KNN: metric (str): see `sklearn.neighbors.NearestNeighbors`. eps (float): relative error when computing nearest neighbors with the cKDTree. p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2. - n_jobs (int): Number of jobs to schedule for parallel processing of nearest neighbors on the CPU. + n_jobs (int): number of jobs to schedule for parallel processing of nearest neighbors on the CPU. If -1 is given all processors are used. Default: 1. kwargs: additional parameters are forwarded to the backends. """ -- cgit v1.2.3 From 990d54f2f13e116f97c1d0f35cbb751015d863fe Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 12:20:57 +0100 Subject: Fix test --- src/python/test/test_dtm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 93b13e1a..1d080ab4 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -30,7 +30,7 @@ def test_dtm_compare_euclidean(): from scipy.spatial.distance import cdist d = cdist(pts, pts) - dtm = DTM(k, metric="precomputed") + dtm = DTM(k, q=2, metric="precomputed") r4 = dtm.fit_transform(d) assert r4 == pytest.approx(r0) dtm = DTM(k, implementation="keops") -- cgit v1.2.3 From 40f4b6fb1fe20c3843b1fd80f99996e6d25c9426 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 12:26:36 +0100 Subject: Comment --- src/python/gudhi/point_cloud/dtm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 3ac69f31..ba011eaf 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -59,4 +59,6 @@ class DTM: distances = distances ** q dtm = distances.sum(-1) / self.k dtm = dtm ** (1.0 / q) + # We compute too many powers, 1/p in knn then q in dtm, 1/q in dtm then q or some log in the caller. + # Add option to skip the final root? return dtm -- cgit v1.2.3 From 7f323484acdeafca93efdd9bdd20ed428f8fb95b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 12:45:00 +0100 Subject: Optional sort_results --- src/python/gudhi/point_cloud/dtm.py | 4 +--- src/python/gudhi/point_cloud/knn.py | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index ba011eaf..678524f2 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -35,9 +35,7 @@ class DTM: X (numpy.array): coordinates for mass points. """ if self.params.setdefault("metric", "euclidean") != "neighbors": - # KNN gives sorted distances, which is unnecessary here. - # Maybe add a parameter to say we don't need sorting? - self.knn = KNN(self.k, return_index=False, return_distance=True, **self.params) + self.knn = KNN(self.k, return_index=False, return_distance=True, sort_results=False, **self.params) self.knn.fit(X) return self diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index bb7757f2..8369f1f8 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -33,6 +33,9 @@ class KNN: p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2. n_jobs (int): number of jobs to schedule for parallel processing of nearest neighbors on the CPU. If -1 is given all processors are used. Default: 1. + sort_results (bool): if True, then distances and indices of each point are + sorted on return, so that the first column contains the closest points. + Otherwise, neighbors are returned in an arbitrary order. Defaults to True. kwargs: additional parameters are forwarded to the backends. """ self.k = k @@ -115,18 +118,22 @@ class KNN: X = numpy.array(X) if self.return_index: neighbors = numpy.argpartition(X, k - 1)[:, 0:k] - distances = numpy.take_along_axis(X, neighbors, axis=-1) - ngb_order = numpy.argsort(distances, axis=-1) - neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + if self.params.get("sort_results", True): + X = numpy.take_along_axis(X, neighbors, axis=-1) + ngb_order = numpy.argsort(X, axis=-1) + neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + else: + ngb_order = neighbors if self.return_distance: - distances = numpy.take_along_axis(distances, ngb_order, axis=-1) + distances = numpy.take_along_axis(X, ngb_order, axis=-1) return neighbors, distances else: return neighbors if self.return_distance: distances = numpy.partition(X, k - 1)[:, 0:k] - # partition is not guaranteed to sort the lower half, although it often does - distances.sort(axis=-1) + if self.params.get("sort_results"): + # partition is not guaranteed to sort the lower half, although it often does + distances.sort(axis=-1) return distances return None -- cgit v1.2.3 From 75286efcf311f0c7c46a7039970d663f60953e14 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 12:59:01 +0100 Subject: Fix test --- src/python/test/test_dtm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 1d080ab4..33b2f3a2 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -30,7 +30,7 @@ def test_dtm_compare_euclidean(): from scipy.spatial.distance import cdist d = cdist(pts, pts) - dtm = DTM(k, q=2, metric="precomputed") + dtm = DTM(k, q=4, metric="precomputed") r4 = dtm.fit_transform(d) assert r4 == pytest.approx(r0) dtm = DTM(k, implementation="keops") -- cgit v1.2.3 From dd9457649d0d197bbed6402200e0f2f55655680e Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 15:39:15 +0100 Subject: Default param of 2 for DTM --- src/python/gudhi/point_cloud/dtm.py | 14 ++++---------- src/python/test/test_dtm.py | 2 +- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 678524f2..c26ba844 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -15,11 +15,11 @@ class DTM: Class to compute the distance to the empirical measure defined by a point set. """ - def __init__(self, k, q=None, **kwargs): + def __init__(self, k, q=2, **kwargs): """ Args: k (int): number of neighbors (possibly including the point itself). - q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if metric is "neighbors" or "distance_matrix". + q (float): order used to compute the distance to measure. Defaults to 2. kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. """ self.k = k @@ -44,19 +44,13 @@ class DTM: Args: X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", or distances to the k nearest neighbors if metric is "neighbors" (if the array has more than k columns, the remaining ones are ignored). """ - q = self.q - if q is None: - if self.params["metric"] in {"neighbors", "precomputed"}: - q = 2 - else: - q = len(X[0]) if self.params["metric"] == "neighbors": distances = X[:, : self.k] else: distances = self.knn.transform(X) - distances = distances ** q + distances = distances ** self.q dtm = distances.sum(-1) / self.k - dtm = dtm ** (1.0 / q) + dtm = dtm ** (1.0 / self.q) # We compute too many powers, 1/p in knn then q in dtm, 1/q in dtm then q or some log in the caller. # Add option to skip the final root? return dtm diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 33b2f3a2..93b13e1a 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -30,7 +30,7 @@ def test_dtm_compare_euclidean(): from scipy.spatial.distance import cdist d = cdist(pts, pts) - dtm = DTM(k, q=4, metric="precomputed") + dtm = DTM(k, metric="precomputed") r4 = dtm.fit_transform(d) assert r4 == pytest.approx(r0) dtm = DTM(k, implementation="keops") -- cgit v1.2.3 From 8d06fbeae596a0372bf9a921de7d04cc734eaa3b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 30 Mar 2020 08:14:46 +0200 Subject: Biblio --- biblio/bibliography.bib | 15 +++++++++++++++ src/python/gudhi/point_cloud/dtm.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index 3bbe7960..f9d43638 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -1192,3 +1192,18 @@ numpages = {11}, location = {Montr\'{e}al, Canada}, series = {NIPS’18} } +@Article{dtm, +author={Chazal, Fr{\'e}d{\'e}ric +and Cohen-Steiner, David +and M{\'e}rigot, Quentin}, +title={Geometric Inference for Probability Measures}, +journal={Foundations of Computational Mathematics}, +year={2011}, +volume={11}, +number={6}, +pages={733-751}, +abstract={Data often comes in the form of a point cloud sampled from an unknown compact subset of Euclidean space. The general goal of geometric inference is then to recover geometric and topological features (e.g., Betti numbers, normals) of this subset from the approximating point cloud data. It appears that the study of distance functions allows one to address many of these questions successfully. However, one of the main limitations of this framework is that it does not cope well with outliers or with background noise. In this paper, we show how to extend the framework of distance functions to overcome this problem. Replacing compact subsets by measures, we introduce a notion of distance function to a probability distribution in Rd. These functions share many properties with classical distance functions, which make them suitable for inference purposes. In particular, by considering appropriate level sets of these distance functions, we show that it is possible to reconstruct offsets of sampled shapes with topological guarantees even in the presence of outliers. Moreover, in settings where empirical measures are considered, these functions can be easily evaluated, making them of particular practical interest.}, +issn={1615-3383}, +doi={10.1007/s10208-011-9098-0}, +url={https://doi.org/10.1007/s10208-011-9098-0} +} diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index c26ba844..23c36b88 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -12,7 +12,7 @@ from .knn import KNN class DTM: """ - Class to compute the distance to the empirical measure defined by a point set. + Class to compute the distance to the empirical measure defined by a point set, as introduced in :cite:`dtm`. """ def __init__(self, k, q=2, **kwargs): -- cgit v1.2.3 From 0a404547afec2e43dd5edf9410ff079d156d691a Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 30 Mar 2020 08:18:38 +0200 Subject: One more ref, to be cited later --- biblio/bibliography.bib | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index f9d43638..056ccd72 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -1207,3 +1207,15 @@ issn={1615-3383}, doi={10.1007/s10208-011-9098-0}, url={https://doi.org/10.1007/s10208-011-9098-0} } +@article{dtmdensity, +author = "Biau, Gérard and Chazal, Frédéric and Cohen-Steiner, David and Devroye, Luc and Rodríguez, Carlos", +doi = "10.1214/11-EJS606", +fjournal = "Electronic Journal of Statistics", +journal = "Electron. J. Statist.", +pages = "204--237", +publisher = "The Institute of Mathematical Statistics and the Bernoulli Society", +title = "A weighted k-nearest neighbor density estimate for geometric inference", +url = "https://doi.org/10.1214/11-EJS606", +volume = "5", +year = "2011" +} -- cgit v1.2.3 From c5c565dfd92ce1ad5b318dca40edf9429d6334c2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 30 Mar 2020 20:46:56 +0200 Subject: Streamline initialize_filtration --- src/Alpha_complex/test/Alpha_complex_unit_test.cpp | 3 -- .../utilities/alpha_complex_3d_persistence.cpp | 3 -- .../utilities/alpha_complex_persistence.cpp | 3 -- .../alpha_rips_persistence_bottleneck_distance.cpp | 6 --- .../example/custom_persistence_sort.cpp | 3 -- .../example/persistence_from_file.cpp | 3 -- .../example/plain_homology.cpp | 3 -- .../example/rips_multifield_persistence.cpp | 3 -- .../example/rips_persistence_step_by_step.cpp | 3 -- .../include/gudhi/Persistent_cohomology.h | 2 - .../rips_correlation_matrix_persistence.cpp | 3 -- .../utilities/rips_distance_matrix_persistence.cpp | 3 -- src/Rips_complex/utilities/rips_persistence.cpp | 3 -- .../utilities/sparse_rips_persistence.cpp | 3 -- src/Simplex_tree/include/gudhi/Simplex_tree.h | 56 ++++++++++++++-------- src/python/doc/simplex_tree_ref.rst | 1 - .../example/alpha_complex_from_points_example.py | 3 -- src/python/example/simplex_tree_example.py | 1 - src/python/gudhi/simplex_tree.pxd | 3 +- src/python/gudhi/simplex_tree.pyx | 50 ++----------------- src/python/include/Alpha_complex_interface.h | 1 - .../Euclidean_strong_witness_complex_interface.h | 2 - .../include/Euclidean_witness_complex_interface.h | 2 - src/python/include/Nerve_gic_interface.h | 1 - src/python/include/Rips_complex_interface.h | 1 - src/python/include/Simplex_tree_interface.h | 15 +++--- .../include/Strong_witness_complex_interface.h | 2 - src/python/include/Tangential_complex_interface.h | 1 - src/python/include/Witness_complex_interface.h | 2 - src/python/test/test_simplex_tree.py | 3 -- 30 files changed, 48 insertions(+), 140 deletions(-) diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp index da1d8004..4b37e4bd 100644 --- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp +++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp @@ -188,9 +188,6 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) { // Test after prune_above_filtration bool modified = simplex_tree.prune_above_filtration(0.6); - if (modified) { - simplex_tree.initialize_filtration(); - } BOOST_CHECK(modified); // Another way to check num_simplices diff --git a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp index e93c412e..91899040 100644 --- a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp +++ b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp @@ -222,9 +222,6 @@ int main(int argc, char **argv) { break; } - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - std::clog << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl; // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree, true); diff --git a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp index be60ff78..7c898dfd 100644 --- a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp +++ b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp @@ -75,9 +75,6 @@ int main(int argc, char **argv) { std::clog << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; - // Sort the simplices in the order of the filtration - simplex.initialize_filtration(); - std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl; // Compute the persistence diagram of the complex Gudhi::persistent_cohomology::Persistent_cohomology pcoh( diff --git a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp index 4769eca3..ceb9e226 100644 --- a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp +++ b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp @@ -71,9 +71,6 @@ int main(int argc, char * argv[]) { std::clog << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension " << rips_stree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - rips_stree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology rips_pcoh(rips_stree); // initializes the coefficient field for homology @@ -92,9 +89,6 @@ int main(int argc, char * argv[]) { std::clog << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension " << alpha_stree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - alpha_stree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology alpha_pcoh(alpha_stree); // initializes the coefficient field for homology diff --git a/src/Persistent_cohomology/example/custom_persistence_sort.cpp b/src/Persistent_cohomology/example/custom_persistence_sort.cpp index 87e9c207..410cd987 100644 --- a/src/Persistent_cohomology/example/custom_persistence_sort.cpp +++ b/src/Persistent_cohomology/example/custom_persistence_sort.cpp @@ -86,9 +86,6 @@ int main(int argc, char **argv) { " - " << simplex.num_simplices() << " simplices - " << simplex.num_vertices() << " vertices." << std::endl; - // Sort the simplices in the order of the filtration - simplex.initialize_filtration(); - std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl; Persistent_cohomology pcoh(simplex); diff --git a/src/Persistent_cohomology/example/persistence_from_file.cpp b/src/Persistent_cohomology/example/persistence_from_file.cpp index 79108730..38c44514 100644 --- a/src/Persistent_cohomology/example/persistence_from_file.cpp +++ b/src/Persistent_cohomology/example/persistence_from_file.cpp @@ -59,9 +59,6 @@ int main(int argc, char * argv[]) { std::clog << std::endl; }*/ - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology< Simplex_tree<>, Field_Zp > pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Persistent_cohomology/example/plain_homology.cpp b/src/Persistent_cohomology/example/plain_homology.cpp index 4d329020..236b67de 100644 --- a/src/Persistent_cohomology/example/plain_homology.cpp +++ b/src/Persistent_cohomology/example/plain_homology.cpp @@ -59,9 +59,6 @@ int main() { st.insert_simplex_and_subfaces(edge35); st.insert_simplex(vertex4); - // Sort the simplices in the order of the filtration - st.initialize_filtration(); - // Class for homology computation // By default, since the complex has dimension 1, only 0-dimensional homology would be computed. // Here we also want persistent homology to be computed for the maximal dimension in the complex (persistence_dim_max = true) diff --git a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp index e2e2c0a5..2edf5bc4 100644 --- a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp +++ b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp @@ -59,9 +59,6 @@ int main(int argc, char * argv[]) { std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp index 7da9f15d..a503d983 100644 --- a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp +++ b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp @@ -76,9 +76,6 @@ int main(int argc, char * argv[]) { std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; std::clog << " and has dimension " << st.dimension() << " \n"; - // Sort the simplices in the order of the filtration - st.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(st); // initializes the coefficient field for homology diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index ca4bc10d..bc111f94 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -561,7 +561,6 @@ class Persistent_cohomology { void output_diagram(std::ostream& ostream = std::cout) { cmp_intervals_by_length cmp(cpx_); std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); - bool has_infinity = std::numeric_limits::has_infinity; for (auto pair : persistent_pairs_) { ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " " << cpx_->filtration(get<0>(pair)) << " " @@ -573,7 +572,6 @@ class Persistent_cohomology { std::ofstream diagram_out(diagram_name.c_str()); cmp_intervals_by_length cmp(cpx_); std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); - bool has_infinity = std::numeric_limits::has_infinity; for (auto pair : persistent_pairs_) { diagram_out << cpx_->dimension(get<0>(pair)) << " " << cpx_->filtration(get<0>(pair)) << " " diff --git a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp index 67f921a6..b473738e 100644 --- a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp +++ b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp @@ -71,9 +71,6 @@ int main(int argc, char* argv[]) { std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp index 4ad19675..6306755d 100644 --- a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp +++ b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp @@ -50,9 +50,6 @@ int main(int argc, char* argv[]) { std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Rips_complex/utilities/rips_persistence.cpp b/src/Rips_complex/utilities/rips_persistence.cpp index 4cc63d3c..9d7490b3 100644 --- a/src/Rips_complex/utilities/rips_persistence.cpp +++ b/src/Rips_complex/utilities/rips_persistence.cpp @@ -52,9 +52,6 @@ int main(int argc, char* argv[]) { std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Rips_complex/utilities/sparse_rips_persistence.cpp b/src/Rips_complex/utilities/sparse_rips_persistence.cpp index 40606158..ac935b41 100644 --- a/src/Rips_complex/utilities/sparse_rips_persistence.cpp +++ b/src/Rips_complex/utilities/sparse_rips_persistence.cpp @@ -54,9 +54,6 @@ int main(int argc, char* argv[]) { std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n"; std::clog << " and has dimension " << simplex_tree.dimension() << " \n"; - // Sort the simplices in the order of the filtration - simplex_tree.initialize_filtration(); - // Compute the persistence diagram of the complex Persistent_cohomology pcoh(simplex_tree); // initializes the coefficient field for homology diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index b455ae31..43250795 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -142,7 +142,10 @@ class Simplex_tree { public: /** \brief Handle type to a simplex contained in the simplicial complex represented - * by the simplex tree. */ + * by the simplex tree. + * + * They are essentially pointers into internal vectors, and any insertion or removal + * of a simplex may invalidate any other Simplex_handle in the complex. */ typedef typename Dictionary::iterator Simplex_handle; private: @@ -255,11 +258,9 @@ class Simplex_tree { * * The filtration must be valid. If the filtration has not been initialized yet, the * method initializes it (i.e. order the simplices). If the complex has changed since the last time the filtration - * was initialized, please call `initialize_filtration()` to recompute it. */ + * was initialized, please call `clear_filtration()` or `initialize_filtration()` to recompute it. */ Filtration_simplex_range const& filtration_simplex_range(Indexing_tag = Indexing_tag()) { - if (filtration_vect_.empty()) { - initialize_filtration(); - } + maybe_initialize_filtration(); return filtration_vect_; } @@ -877,15 +878,13 @@ class Simplex_tree { } public: - /** \brief Initializes the filtrations, i.e. sort the - * simplices according to their order in the filtration and initializes all Simplex_keys. + /** \brief Initializes the filtration cache, i.e. sorts the + * simplices according to their order in the filtration. * - * After calling this method, filtration_simplex_range() becomes valid, and each simplex is - * assigned a Simplex_key corresponding to its order in the filtration (from 0 to m-1 for a - * simplicial complex with m simplices). + * It always recomputes the cache, even if one already exists. * - * Will be automatically called when calling filtration_simplex_range() - * if the filtration has never been initialized yet. */ + * Any insertion, deletion or change of filtration value invalidates this cache, + * which can be cleared with clear_filtration(). */ void initialize_filtration() { filtration_vect_.clear(); filtration_vect_.reserve(num_simplices()); @@ -907,6 +906,21 @@ class Simplex_tree { std::stable_sort(filtration_vect_.begin(), filtration_vect_.end(), is_before_in_filtration(this)); #endif } + /** \brief Initializes the filtration cache if it isn't initialized yet. + * + * Automatically called by filtration_simplex_range(). */ + void maybe_initialize_filtration() { + if (filtration_vect_.empty()) { + initialize_filtration(); + } + } + /** \brief Clears the filtration cache produced by initialize_filtration(). + * + * Useful when initialize_filtration() has already been called and we perform an operation + * (say an insertion) that invalidates the cache. */ + void clear_filtration() { + filtration_vect_.clear(); + } private: /** Recursive search of cofaces @@ -1128,6 +1142,7 @@ class Simplex_tree { * 1 when calling the method. */ void expansion(int max_dim) { if (max_dim <= 1) return; + clear_filtration(); // Drop the cache. dimension_ = max_dim; for (Dictionary_it root_it = root_.members_.begin(); root_it != root_.members_.end(); ++root_it) { @@ -1338,9 +1353,6 @@ class Simplex_tree { /** \brief This function ensures that each simplex has a higher filtration value than its faces by increasing the * filtration values. * @return True if any filtration value was modified, false if the filtration was already non-decreasing. - * \post Some simplex tree functions require the filtration to be valid. `make_filtration_non_decreasing()` - * function is not launching `initialize_filtration()` but returns the filtration modification information. If the - * complex has changed , please call `initialize_filtration()` to recompute it. * * If a simplex has a `NaN` filtration value, it is considered lower than any other defined filtration value. */ @@ -1352,6 +1364,8 @@ class Simplex_tree { modified |= rec_make_filtration_non_decreasing(simplex.second.children()); } } + if(modified) + clear_filtration(); // Drop the cache. return modified; } @@ -1391,16 +1405,16 @@ class Simplex_tree { public: /** \brief Prune above filtration value given as parameter. * @param[in] filtration Maximum threshold value. - * @return The filtration modification information. - * \post Some simplex tree functions require the filtration to be valid. `prune_above_filtration()` - * function is not launching `initialize_filtration()` but returns the filtration modification information. If the - * complex has changed , please call `initialize_filtration()` to recompute it. + * @return True if any simplex was removed, false if all simplices already had a value below the threshold. * \post Note that the dimension of the simplicial complex may be lower after calling `prune_above_filtration()` * than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper * bound. If you care, you can call `dimension()` to recompute the exact dimension. */ bool prune_above_filtration(Filtration_value filtration) { - return rec_prune_above_filtration(root(), filtration); + bool modified = rec_prune_above_filtration(root(), filtration); + if(modified) + clear_filtration(); // Drop the cache. + return modified; } private: @@ -1467,7 +1481,6 @@ class Simplex_tree { * @param[in] sh Simplex handle on the maximal simplex to remove. * \pre Please check the simplex has no coface before removing it. * \exception std::invalid_argument In debug mode, if sh has children. - * \post Be aware that removing is shifting data in a flat_map (initialize_filtration to be done). * \post Note that the dimension of the simplicial complex may be lower after calling `remove_maximal_simplex()` * than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper * bound. If you care, you can call `dimension()` to recompute the exact dimension. @@ -1539,6 +1552,7 @@ class Simplex_tree { * the original filtration values for each simplex. */ Extended_filtration_data extend_filtration() { + clear_filtration(); // Drop the cache. // Compute maximum and minimum of filtration values Vertex_handle maxvert = std::numeric_limits::min(); diff --git a/src/python/doc/simplex_tree_ref.rst b/src/python/doc/simplex_tree_ref.rst index 9eb8c199..46b2c1e5 100644 --- a/src/python/doc/simplex_tree_ref.rst +++ b/src/python/doc/simplex_tree_ref.rst @@ -8,7 +8,6 @@ Simplex tree reference manual .. autoclass:: gudhi.SimplexTree :members: - :undoc-members: :show-inheritance: .. automethod:: gudhi.SimplexTree.__init__ diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py index 73faf17c..465632eb 100755 --- a/src/python/example/alpha_complex_from_points_example.py +++ b/src/python/example/alpha_complex_from_points_example.py @@ -46,9 +46,6 @@ if simplex_tree.find([4]): else: print("[4] Not found...") -# Some insertions, simplex_tree needs to initialize filtrations -simplex_tree.initialize_filtration() - print("dimension=", simplex_tree.dimension()) print("filtrations=") for simplex_with_filtration in simplex_tree.get_filtration(): diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py index 34833899..c4635dc5 100755 --- a/src/python/example/simplex_tree_example.py +++ b/src/python/example/simplex_tree_example.py @@ -42,7 +42,6 @@ print("simplices=") for simplex_with_filtration in st.get_simplices(): print("(%s, %.2f)" % tuple(simplex_with_filtration)) -st.initialize_filtration() print("filtration=") for simplex_with_filtration in st.get_filtration(): print("(%s, %.2f)" % tuple(simplex_with_filtration)) diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 595f22bb..7e3bba2b 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -48,8 +48,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": int dimension() int upper_bound_dimension() bool find_simplex(vector[int] simplex) - bool insert_simplex_and_subfaces(vector[int] simplex, - double filtration) + bool insert(vector[int] simplex, double filtration) vector[pair[vector[int], double]] get_star(vector[int] simplex) vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index cc3753e1..a709980f 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -90,7 +90,7 @@ cdef class SimplexTree: (with more :meth:`assign_filtration` or :meth:`make_filtration_non_decreasing` for instance) before calling any function that relies on the filtration property, like - :meth:`initialize_filtration`. + :meth:`persistence`. """ self.get_ptr().assign_simplex_filtration(simplex, filtration) @@ -98,16 +98,7 @@ cdef class SimplexTree: """This function initializes and sorts the simplicial complex filtration vector. - .. note:: - - This function must be launched before - :func:`persistence()`, - :func:`betti_numbers()`, - :func:`persistent_betti_numbers()`, - or :func:`get_filtration()` - after :func:`inserting` or - :func:`removing` - simplices. + .. deprecated:: 3.2.0 """ self.get_ptr().initialize_filtration() @@ -182,10 +173,7 @@ cdef class SimplexTree: :returns: true if the simplex was found, false otherwise. :rtype: bool """ - cdef vector[int] csimplex - for i in simplex: - csimplex.push_back(i) - return self.get_ptr().find_simplex(csimplex) + return self.get_ptr().find_simplex(simplex) def insert(self, simplex, filtration=0.0): """This function inserts the given N-simplex and its subfaces with the @@ -202,11 +190,7 @@ cdef class SimplexTree: otherwise (whatever its original filtration value). :rtype: bool """ - cdef vector[int] csimplex - for i in simplex: - csimplex.push_back(i) - return self.get_ptr().insert_simplex_and_subfaces(csimplex, - filtration) + return self.get_ptr().insert(simplex, filtration) def get_simplices(self): """This function returns a generator with simplices and their given @@ -306,11 +290,6 @@ cdef class SimplexTree: :param simplex: The N-simplex, represented by a list of vertex. :type simplex: list of int. - .. note:: - - Be aware that removing is shifting data in a flat_map - (:func:`initialize_filtration()` to be done). - .. note:: The dimension of the simplicial complex may be lower after calling @@ -332,16 +311,6 @@ cdef class SimplexTree: :rtype: bool - .. note:: - - Some simplex tree functions require the filtration to be valid. - prune_above_filtration function is not launching - :func:`initialize_filtration()` - but returns the filtration modification - information. If the complex has changed , please call - :func:`initialize_filtration()` - to recompute it. - .. note:: Note that the dimension of the simplicial complex may be lower @@ -382,17 +351,6 @@ cdef class SimplexTree: :returns: True if any filtration value was modified, False if the filtration was already non-decreasing. :rtype: bool - - - .. note:: - - Some simplex tree functions require the filtration to be valid. - make_filtration_non_decreasing function is not launching - :func:`initialize_filtration()` - but returns the filtration modification - information. If the complex has changed , please call - :func:`initialize_filtration()` - to recompute it. """ return self.get_ptr().make_filtration_non_decreasing() diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h index 8614eee3..40de88f3 100644 --- a/src/python/include/Alpha_complex_interface.h +++ b/src/python/include/Alpha_complex_interface.h @@ -58,7 +58,6 @@ class Alpha_complex_interface { void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) { alpha_complex_->create_complex(*simplex_tree, max_alpha_square); - simplex_tree->initialize_filtration(); } private: diff --git a/src/python/include/Euclidean_strong_witness_complex_interface.h b/src/python/include/Euclidean_strong_witness_complex_interface.h index c1c72737..f94c51ef 100644 --- a/src/python/include/Euclidean_strong_witness_complex_interface.h +++ b/src/python/include/Euclidean_strong_witness_complex_interface.h @@ -50,12 +50,10 @@ class Euclidean_strong_witness_complex_interface { void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) { witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension); - simplex_tree->initialize_filtration(); } void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) { witness_complex_->create_complex(*simplex_tree, max_alpha_square); - simplex_tree->initialize_filtration(); } std::vector get_point(unsigned vh) { diff --git a/src/python/include/Euclidean_witness_complex_interface.h b/src/python/include/Euclidean_witness_complex_interface.h index 5d7dbdc2..4411ae79 100644 --- a/src/python/include/Euclidean_witness_complex_interface.h +++ b/src/python/include/Euclidean_witness_complex_interface.h @@ -49,12 +49,10 @@ class Euclidean_witness_complex_interface { void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) { witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension); - simplex_tree->initialize_filtration(); } void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) { witness_complex_->create_complex(*simplex_tree, max_alpha_square); - simplex_tree->initialize_filtration(); } std::vector get_point(unsigned vh) { diff --git a/src/python/include/Nerve_gic_interface.h b/src/python/include/Nerve_gic_interface.h index 5e7f8ae6..ab14c318 100644 --- a/src/python/include/Nerve_gic_interface.h +++ b/src/python/include/Nerve_gic_interface.h @@ -29,7 +29,6 @@ class Nerve_gic_interface : public Cover_complex> { public: void create_simplex_tree(Simplex_tree_interface<>* simplex_tree) { create_complex(*simplex_tree); - simplex_tree->initialize_filtration(); } void set_cover_from_Euclidean_Voronoi(int m) { set_cover_from_Voronoi(Gudhi::Euclidean_distance(), m); diff --git a/src/python/include/Rips_complex_interface.h b/src/python/include/Rips_complex_interface.h index a66b0e5b..d98b0226 100644 --- a/src/python/include/Rips_complex_interface.h +++ b/src/python/include/Rips_complex_interface.h @@ -53,7 +53,6 @@ class Rips_complex_interface { rips_complex_->create_complex(*simplex_tree, dim_max); else sparse_rips_complex_->create_complex(*simplex_tree, dim_max); - simplex_tree->initialize_filtration(); } private: diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 1a18aed6..5b456baa 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -43,16 +43,19 @@ class Simplex_tree_interface : public Simplex_tree { Extended_filtration_data efd; - bool find_simplex(const Simplex& vh) { - return (Base::find(vh) != Base::null_simplex()); + bool find_simplex(const Simplex& simplex) { + return (Base::find(simplex) != Base::null_simplex()); } - void assign_simplex_filtration(const Simplex& vh, Filtration_value filtration) { - Base::assign_filtration(Base::find(vh), filtration); + void assign_simplex_filtration(const Simplex& simplex, Filtration_value filtration) { + Base::assign_filtration(Base::find(simplex), filtration); + Base::clear_filtration(); } bool insert(const Simplex& simplex, Filtration_value filtration = 0) { Insertion_result result = Base::insert_simplex_and_subfaces(simplex, filtration); + if (result.first != Base::null_simplex()) + Base::clear_filtration(); return (result.second); } @@ -86,7 +89,7 @@ class Simplex_tree_interface : public Simplex_tree { void remove_maximal_simplex(const Simplex& simplex) { Base::remove_maximal_simplex(Base::find(simplex)); - Base::initialize_filtration(); + Base::clear_filtration(); } Simplex_and_filtration get_simplex_and_filtration(Simplex_handle f_simplex) { @@ -123,7 +126,6 @@ class Simplex_tree_interface : public Simplex_tree { void compute_extended_filtration() { this->efd = this->extend_filtration(); - this->initialize_filtration(); return; } @@ -158,7 +160,6 @@ class Simplex_tree_interface : public Simplex_tree { } void create_persistence(Gudhi::Persistent_cohomology_interface* pcoh) { - Base::initialize_filtration(); pcoh = new Gudhi::Persistent_cohomology_interface(*this); } diff --git a/src/python/include/Strong_witness_complex_interface.h b/src/python/include/Strong_witness_complex_interface.h index cda5b514..e9ab0c7b 100644 --- a/src/python/include/Strong_witness_complex_interface.h +++ b/src/python/include/Strong_witness_complex_interface.h @@ -41,13 +41,11 @@ class Strong_witness_complex_interface { void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) { witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension); - simplex_tree->initialize_filtration(); } void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) { witness_complex_->create_complex(*simplex_tree, max_alpha_square); - simplex_tree->initialize_filtration(); } private: diff --git a/src/python/include/Tangential_complex_interface.h b/src/python/include/Tangential_complex_interface.h index 698226cc..b1afce94 100644 --- a/src/python/include/Tangential_complex_interface.h +++ b/src/python/include/Tangential_complex_interface.h @@ -90,7 +90,6 @@ class Tangential_complex_interface { void create_simplex_tree(Simplex_tree<>* simplex_tree) { tangential_complex_->create_complex>(*simplex_tree); - simplex_tree->initialize_filtration(); } void set_max_squared_edge_length(double max_squared_edge_length) { diff --git a/src/python/include/Witness_complex_interface.h b/src/python/include/Witness_complex_interface.h index 45e14253..76947e53 100644 --- a/src/python/include/Witness_complex_interface.h +++ b/src/python/include/Witness_complex_interface.h @@ -41,13 +41,11 @@ class Witness_complex_interface { void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) { witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension); - simplex_tree->initialize_filtration(); } void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) { witness_complex_->create_complex(*simplex_tree, max_alpha_square); - simplex_tree->initialize_filtration(); } private: diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py index 70b26e97..2137d822 100755 --- a/src/python/test/test_simplex_tree.py +++ b/src/python/test/test_simplex_tree.py @@ -46,7 +46,6 @@ def test_insertion(): assert st.find([2, 3]) == False # filtration test - st.initialize_filtration() assert st.filtration([0, 1, 2]) == 4.0 assert st.filtration([0, 2]) == 4.0 assert st.filtration([1, 2]) == 4.0 @@ -93,7 +92,6 @@ def test_insertion(): assert st.find([1]) == True assert st.find([2]) == True - st.initialize_filtration() assert st.persistence(persistence_dim_max=True) == [ (1, (4.0, float("inf"))), (0, (0.0, float("inf"))), @@ -151,7 +149,6 @@ def test_expansion(): st.expansion(3) assert st.num_vertices() == 7 assert st.num_simplices() == 22 - st.initialize_filtration() assert list(st.get_filtration()) == [ ([2], 0.1), -- cgit v1.2.3 From 889d7c92e9cdbab28eba53a9de38a7a0fb27688d Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:20:49 +0200 Subject: added Turner 2014 Frechet in the bibliography --- biblio/bibliography.bib | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index 3bbe7960..ca958e80 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -1192,3 +1192,14 @@ numpages = {11}, location = {Montr\'{e}al, Canada}, series = {NIPS’18} } + +@article{turner2014frechet, + title={Fr{\'e}chet means for distributions of persistence diagrams}, + author={Turner, Katharine and Mileyko, Yuriy and Mukherjee, Sayan and Harer, John}, + journal={Discrete \& Computational Geometry}, + volume={52}, + number={1}, + pages={44--70}, + year={2014}, + publisher={Springer} +} -- cgit v1.2.3 From 4cdc7f03fb5917134ba8886b026c8990f56bcfeb Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:21:27 +0200 Subject: merged doc from barycenters to wasserstein distance --- src/python/doc/wasserstein_distance_sum.inc | 10 +-- src/python/doc/wasserstein_distance_user.rst | 91 ++++++++++++++++++++++++++-- 2 files changed, 92 insertions(+), 9 deletions(-) diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc index a97f428d..09424de2 100644 --- a/src/python/doc/wasserstein_distance_sum.inc +++ b/src/python/doc/wasserstein_distance_sum.inc @@ -3,11 +3,11 @@ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe | - | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams. It's the minimum value c that can be achieved | | - | :figclass: align-center | by a perfect matching between the points of the two diagrams (+ all | :Introduced in: GUDHI 3.1.0 | - | | diagonal points), where the value of a matching is defined as the | | - | Wasserstein distance is the q-th root of the sum of the | q-th root of the sum of all edge lengths to the power q. Edge lengths| :Copyright: MIT | - | edge lengths to the power q. | are measured in norm p, for :math:`1 \leq p \leq \infty`. | | + | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | | + | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Introduced in: GUDHI 3.1.0 | + | | barycenters of a family of persistence diagrams. | | + | Wasserstein distance is the q-th root of the sum of the | | :Copyright: MIT | + | edge lengths to the power q. | | | | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | * :doc:`wasserstein_distance_user` | | diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index a9b21fa5..6de05afc 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -9,10 +9,16 @@ Definition .. include:: wasserstein_distance_sum.inc -Functions ---------- -This implementation uses the Python Optimal Transport library and is based on -ideas from "Large Scale Computation of Means and Cluster for Persistence +The q-Wasserstein distance is defined as the minimal value +by a perfect matching between the points of the two diagrams (+ all +diagonal points), where the value of a matching is defined as the +q-th root of the sum of all edge lengths to the power q. Edge lengths +are measured in norm p, for :math:`1 \leq p \leq \infty`. + +Distance Functions +------------------ +This first implementation uses the Python Optimal Transport library and is based +on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`. .. autofunction:: gudhi.wasserstein.wasserstein_distance @@ -84,3 +90,80 @@ The output is: point 1 in dgm1 is matched to point 2 in dgm2 point 2 in dgm1 is matched to the diagonal point 1 in dgm2 is matched to the diagonal + + +Barycenters +----------- + +A Frechet mean (or barycenter) is a generalization of the arithmetic +mean in a non linear space such as the one of persistence diagrams. +Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is +defined as a minimizer of the variance functional, that is of +:math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. +where :math:`d_2` denotes the Wasserstein-2 distance between +persistence diagrams. +It is known to exist and is generically unique. However, an exact +computation is in general untractable. Current implementation +available is based on (Turner et al., 2014), +:cite:`turner2014frechet` +and uses an EM-scheme to +provide a local minimum of the variance functional (somewhat similar +to the Lloyd algorithm to estimate a solution to the k-means +problem). The local minimum returned depends on the initialization of +the barycenter. +The combinatorial structure of the algorithm limits its +scaling on large scale problems (thousands of diagrams and of points +per diagram). + +.. figure:: + ./img/barycenter.png + :figclass: align-center + + Illustration of Frechet mean between persistence + diagrams. + + +.. autofunction:: gudhi.barycenter.lagrangian_barycenter + +Basic example +------------- + +This example computes the Frechet mean (aka Wasserstein barycenter) between +four persistence diagrams. +It is initialized on the 4th diagram. +As the algorithm is not convex, its output depends on the initialization and +is only a local minimum of the objective function. +Initialization can be either given as an integer (in which case the i-th +diagram of the list is used as initial estimate) or as a diagram. +If None, it will randomly select one of the diagram of the list +as initial estimate. +Note that persistence diagrams must be submitted as +(n x 2) numpy arrays and must not contain inf values. + + +.. testcode:: + + import gudhi.barycenter + import numpy as np + + dg1 = np.array([[0.2, 0.5]]) + dg2 = np.array([[0.2, 0.7]]) + dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) + dg4 = np.array([]) + pdiagset = [dg1, dg2, dg3, dg4] + bary = gudhi.wasserstein.barycenter.lagrangian_barycenter(pdiagset=pdiagset,init=3) + + message = "Wasserstein barycenter estimated:" + print(message) + print(bary) + +The output is: + +.. testoutput:: + + Wasserstein barycenter estimated: + [[0.27916667 0.55416667] + [0.7375 0.7625 ] + [0.2375 0.2625 ]] + + -- cgit v1.2.3 From 4adbdcf16f311b0b5151311f77cfead5bf065bf4 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:22:50 +0200 Subject: removed barycenters specific doc files as those are included in wasserstein distance now --- src/python/doc/barycenter_sum.inc | 24 --------------- src/python/doc/barycenter_user.rst | 60 -------------------------------------- 2 files changed, 84 deletions(-) delete mode 100644 src/python/doc/barycenter_sum.inc delete mode 100644 src/python/doc/barycenter_user.rst diff --git a/src/python/doc/barycenter_sum.inc b/src/python/doc/barycenter_sum.inc deleted file mode 100644 index da2bdd84..00000000 --- a/src/python/doc/barycenter_sum.inc +++ /dev/null @@ -1,24 +0,0 @@ -.. table:: - :widths: 30 50 20 - - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | .. figure:: | A Frechet mean (or barycenter) is a generalization of the arithmetic | :Author: Theo Lacombe | - | ./img/barycenter.png | mean in a non linear space such as the one of persistence diagrams. | | - | :figclass: align-center | Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is | :Introduced in: GUDHI 3.1.0 | - | | defined as a minimizer of the variance functional, that is of | | - | Illustration of Frechet mean between persistence | :math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. | :Copyright: MIT | - | diagrams. | where :math:`d_2` denotes the Wasserstein-2 distance between | | - | | persistence diagrams. | | - | | It is known to exist and is generically unique. However, an exact | | - | | computation is in general untractable. Current implementation | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | - | | available is based on [Turner et al, 2014], and uses an EM-scheme to | | - | | provide a local minimum of the variance functional (somewhat similar | | - | | to the Lloyd algorithm to estimate a solution to the k-means | | - | | problem). The local minimum returned depends on the initialization of| | - | | the barycenter. | | - | | The combinatorial structure of the algorithm limits its | | - | | scaling on large scale problems (thousands of diagrams and of points | | - | | per diagram). | | - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | * :doc:`barycenter_user` | | - +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/barycenter_user.rst b/src/python/doc/barycenter_user.rst deleted file mode 100644 index 83e9bebb..00000000 --- a/src/python/doc/barycenter_user.rst +++ /dev/null @@ -1,60 +0,0 @@ -:orphan: - -.. To get rid of WARNING: document isn't included in any toctree - -Barycenter user manual -================================ -Definition ----------- - -.. include:: barycenter_sum.inc - -This implementation is based on ideas from "Frechet means for distribution of -persistence diagrams", Turner et al. 2014. - -Function --------- -.. autofunction:: gudhi.barycenter.lagrangian_barycenter - - -Basic example -------------- - -This example computes the Frechet mean (aka Wasserstein barycenter) between -four persistence diagrams. -It is initialized on the 4th diagram. -As the algorithm is not convex, its output depends on the initialization and -is only a local minimum of the objective function. -Initialization can be either given as an integer (in which case the i-th -diagram of the list is used as initial estimate) or as a diagram. -If None, it will randomly select one of the diagram of the list -as initial estimate. -Note that persistence diagrams must be submitted as -(n x 2) numpy arrays and must not contain inf values. - -.. testcode:: - - import gudhi.barycenter - import numpy as np - - dg1 = np.array([[0.2, 0.5]]) - dg2 = np.array([[0.2, 0.7]]) - dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) - dg4 = np.array([]) - pdiagset = [dg1, dg2, dg3, dg4] - bary = gudhi.barycenter.lagrangian_barycenter(pdiagset=pdiagset,init=3) - - message = "Wasserstein barycenter estimated:" - print(message) - print(bary) - -The output is: - -.. testoutput:: - - Wasserstein barycenter estimated: - [[0.27916667 0.55416667] - [0.7375 0.7625 ] - [0.2375 0.2625 ]] - - -- cgit v1.2.3 From 9f55afbb17494c67709d9be58bf8bb876f704524 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:24:21 +0200 Subject: added import barycenter on top of the file so that we can call for gudhi.wasserstein.barycenter --- src/python/gudhi/wasserstein.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 3dd993f9..8f03039b 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -9,6 +9,7 @@ import numpy as np import scipy.spatial.distance as sc +import barycenter try: import ot except ImportError: -- cgit v1.2.3 From 7721ac6181fc394ae0136ee176d63210e727f06f Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:40:46 +0200 Subject: modified import in test to get consistent with gudhi.wasserstein.barycenter --- src/python/test/test_wasserstein_barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index 4d18616b..f686aef5 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -1,4 +1,4 @@ -from gudhi.barycenter import lagrangian_barycenter +from gudhi.wasserstein.barycenter import lagrangian_barycenter import numpy as np """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -- cgit v1.2.3 From eeeac06a05ee99ae5780b3f37f107680a680985a Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 11:54:06 +0200 Subject: removed unused import --- src/python/gudhi/barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py index 0490fdd1..079bcc57 100644 --- a/src/python/gudhi/barycenter.py +++ b/src/python/gudhi/barycenter.py @@ -12,7 +12,7 @@ import ot import numpy as np import scipy.spatial.distance as sc -from gudhi.wasserstein import wasserstein_distance, _perstot +from gudhi.wasserstein import wasserstein_distance def _mean(x, m): -- cgit v1.2.3 From dae83f0907a5bd94cb483ad0f54755da2d49fb75 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 12:49:22 +0200 Subject: changed into import .barycenter for local import in wasserstein, and modified index to remove barycenter doc --- src/python/doc/index.rst | 4 ---- src/python/gudhi/wasserstein.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index 96cd3513..0e484483 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -71,10 +71,6 @@ Wasserstein distance .. include:: wasserstein_distance_sum.inc -Barycenter -============ - -.. include:: barycenter_sum.inc Persistence representations =========================== diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 8f03039b..760eea8c 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -9,7 +9,7 @@ import numpy as np import scipy.spatial.distance as sc -import barycenter +import .barycenter try: import ot except ImportError: -- cgit v1.2.3 From a924e71d2f1a649ca389cfeceb678cc45aaf9fa7 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 12:55:51 +0200 Subject: micro modif changed a word to avoid repetition --- src/python/doc/wasserstein_distance_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 6de05afc..a077f9a4 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -112,7 +112,7 @@ to the Lloyd algorithm to estimate a solution to the k-means problem). The local minimum returned depends on the initialization of the barycenter. The combinatorial structure of the algorithm limits its -scaling on large scale problems (thousands of diagrams and of points +performances on large scale problems (thousands of diagrams and of points per diagram). .. figure:: -- cgit v1.2.3 From 1aaffd2e1fab45988d92f5e51a9d294696ff5492 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 13:18:42 +0200 Subject: changed import to import gudhi.barycenter as barycenter --- src/python/gudhi/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py index 760eea8c..51d1d83c 100644 --- a/src/python/gudhi/wasserstein.py +++ b/src/python/gudhi/wasserstein.py @@ -9,7 +9,7 @@ import numpy as np import scipy.spatial.distance as sc -import .barycenter +import gudhi.barycenter as barycenter try: import ot except ImportError: -- cgit v1.2.3 From 842475615841f864b4ce41a2a4b69f1e189a2946 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 15:02:32 +0200 Subject: created wasserstein repo --- src/python/gudhi/barycenter.py | 158 ---------------------------- src/python/gudhi/wasserstein.py | 125 ---------------------- src/python/gudhi/wasserstein/__init__.py | 1 + src/python/gudhi/wasserstein/barycenter.py | 158 ++++++++++++++++++++++++++++ src/python/gudhi/wasserstein/wasserstein.py | 125 ++++++++++++++++++++++ 5 files changed, 284 insertions(+), 283 deletions(-) delete mode 100644 src/python/gudhi/barycenter.py delete mode 100644 src/python/gudhi/wasserstein.py create mode 100644 src/python/gudhi/wasserstein/__init__.py create mode 100644 src/python/gudhi/wasserstein/barycenter.py create mode 100644 src/python/gudhi/wasserstein/wasserstein.py diff --git a/src/python/gudhi/barycenter.py b/src/python/gudhi/barycenter.py deleted file mode 100644 index 079bcc57..00000000 --- a/src/python/gudhi/barycenter.py +++ /dev/null @@ -1,158 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Theo Lacombe -# -# Copyright (C) 2019 Inria -# -# Modification(s): -# - YYYY/MM Author: Description of the modification - - -import ot -import numpy as np -import scipy.spatial.distance as sc - -from gudhi.wasserstein import wasserstein_distance - - -def _mean(x, m): - ''' - :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} - :param m: total amount of points taken into account, - that is we have (m-k) copies of diagonal - :returns: the weighted mean of x with (m-k) copies of the diagonal - ''' - k = len(x) - if k > 0: - w = np.mean(x, axis=0) - w_delta = (w[0] + w[1]) / 2 * np.ones(2) - return (k * w + (m-k) * w_delta) / m - else: - return np.array([0, 0]) - - -def lagrangian_barycenter(pdiagset, init=None, verbose=False): - ''' - :param pdiagset: a list of size m containing numpy.array of shape (n x 2) - (n can variate), encoding a set of - persistence diagrams with only finite coordinates. - :param init: The initial value for barycenter estimate. - If None, init is made on a random diagram from the dataset. - Otherwise, it must be an int - (then we init with diagset[init]) - or a (n x 2) numpy.array enconding - a persistence diagram with n points. - :param verbose: if True, returns additional information about the - barycenter. - :returns: If not verbose (default), a numpy.array encoding - the barycenter estimate of pdiagset - (local minima of the energy function). - If pdiagset is empty, returns None. - If verbose, returns a couple (Y, log) - where Y is the barycenter estimate, - and log is a dict that contains additional informations: - - groupings, a list of list of pairs (i,j), - That is, G[k] = [(i, j) ...], where (i,j) indicates - that X[i] is matched to Y[j] - if i = -1 or j = -1, it means they - represent the diagonal. - - energy, a float representing the Frechet - energy value obtained, - that is the mean of squared distances - of observations to the output. - - nb_iter, integer representing the number of iterations - performed before convergence of the algorithm. - ''' - X = pdiagset # to shorten notations, not a copy - m = len(X) # number of diagrams we are averaging - if m == 0: - print("Warning: computing barycenter of empty diag set. Returns None") - return None - - # store the number of off-diagonal point for each of the X_i - nb_off_diag = np.array([len(X_i) for X_i in X]) - # Initialisation of barycenter - if init is None: - i0 = np.random.randint(m) # Index of first state for the barycenter - Y = X[i0].copy() - else: - if type(init)==int: - Y = X[init].copy() - else: - Y = init.copy() - - nb_iter = 0 - - converged = False # stoping criterion - while not converged: - nb_iter += 1 - K = len(Y) # current nb of points in Y (some might be on diagonal) - G = np.full((K, m), -1, dtype=int) # will store for each j, the (index) - # point matched in each other diagram - #(might be the diagonal). - # that is G[j, i] = k <=> y_j is matched to - # x_k in the diagram i-th diagram X[i] - updated_points = np.zeros((K, 2)) # will store the new positions of - # the points of Y. - # If points disappear, there thrown - # on [0,0] by default. - new_created_points = [] # will store potential new points. - - # Step 1 : compute optimal matching (Y, X_i) for each X_i - # and create new points in Y if needed - for i in range(m): - _, indices = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) - for y_j, x_i_j in indices: - if y_j >= 0: # we matched an off diagonal point to x_i_j... - if x_i_j >= 0: # ...which is also an off-diagonal point. - G[y_j, i] = x_i_j - else: # ...which is a diagonal point - G[y_j, i] = -1 # -1 stands for the diagonal (mask) - else: # We matched a diagonal point to x_i_j... - if x_i_j >= 0: # which is a off-diag point ! - # need to create new point in Y - new_y = _mean(np.array([X[i][x_i_j]]), m) - # Average this point with (m-1) copies of Delta - new_created_points.append(new_y) - - # Step 2 : Update current point position thanks to groupings computed - to_delete = [] - for j in range(K): - matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] - new_y_j = _mean(matched_points, m) - if not np.array_equal(new_y_j, np.array([0,0])): - updated_points[j] = new_y_j - else: # this points is no longer of any use. - to_delete.append(j) - # we remove the point to be deleted now. - updated_points = np.delete(updated_points, to_delete, axis=0) - - # we cannot converge if there have been new created points. - if new_created_points: - Y = np.concatenate((updated_points, new_created_points)) - else: - # Step 3 : we check convergence - if np.array_equal(updated_points, Y): - converged = True - Y = updated_points - - - if verbose: - groupings = [] - energy = 0 - log = {} - n_y = len(Y) - for i in range(m): - cost, edges = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) - groupings.append(edges) - energy += cost - log["groupings"] = groupings - energy = energy/m - print(energy) - log["energy"] = energy - log["nb_iter"] = nb_iter - - return Y, log - else: - return Y - diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py deleted file mode 100644 index 51d1d83c..00000000 --- a/src/python/gudhi/wasserstein.py +++ /dev/null @@ -1,125 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Theo Lacombe -# -# Copyright (C) 2019 Inria -# -# Modification(s): -# - YYYY/MM Author: Description of the modification - -import numpy as np -import scipy.spatial.distance as sc -import gudhi.barycenter as barycenter -try: - import ot -except ImportError: - print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT") - -def _proj_on_diag(X): - ''' - :param X: (n x 2) array encoding the points of a persistent diagram. - :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal - ''' - Z = (X[:,0] + X[:,1]) / 2. - return np.array([Z , Z]).T - - -def _build_dist_matrix(X, Y, order=2., internal_p=2.): - ''' - :param X: (n x 2) numpy.array encoding the (points of the) first diagram. - :param Y: (m x 2) numpy.array encoding the second diagram. - :param order: exponent for the Wasserstein metric. - :param internal_p: Ground metric (i.e. norm L^p). - :returns: (n+1) x (m+1) np.array encoding the cost matrix C. - For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], - while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) - and its orthogonal projection onto the diagonal. - note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). - ''' - Xdiag = _proj_on_diag(X) - Ydiag = _proj_on_diag(Y) - if np.isinf(internal_p): - C = sc.cdist(X,Y, metric='chebyshev')**order - Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order - Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order - else: - C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order - Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order - Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order - Cf = np.hstack((C, Cxd[:,None])) - Cdy = np.append(Cdy, 0) - - Cf = np.vstack((Cf, Cdy[None,:])) - - return Cf - - -def _perstot(X, order, internal_p): - ''' - :param X: (n x 2) numpy.array (points of a given diagram). - :param order: exponent for Wasserstein. Default value is 2. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). - :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). - ''' - Xdiag = _proj_on_diag(X) - return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) - - -def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): - ''' - :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points - (i.e. with infinite coordinate). - :param Y: (m x 2) numpy.array encoding the second diagram. - :param matching: if True, computes and returns the optimal matching between X and Y, encoded as - a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to - the j-th point in Y, with the convention (-1) represents the diagonal. - :param order: exponent for Wasserstein; Default value is 2. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); - Default value is 2 (Euclidean norm). - :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with - respect to the internal_p-norm as ground metric. - If matching is set to True, also returns the optimal matching between X and Y. - ''' - n = len(X) - m = len(Y) - - # handle empty diagrams - if X.size == 0: - if Y.size == 0: - if not matching: - return 0. - else: - return 0., np.array([]) - else: - if not matching: - return _perstot(Y, order, internal_p) - else: - return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)]) - elif Y.size == 0: - if not matching: - return _perstot(X, order, internal_p) - else: - return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)]) - - M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) - a = np.ones(n+1) # weight vector of the input diagram. Uniform here. - a[-1] = m - b = np.ones(m+1) # weight vector of the input diagram. Uniform here. - b[-1] = n - - if matching: - P = ot.emd(a=a,b=b,M=M, numItermax=2000000) - ot_cost = np.sum(np.multiply(P,M)) - P[-1, -1] = 0 # Remove matching corresponding to the diagonal - match = np.argwhere(P) - # Now we turn to -1 points encoding the diagonal - match[:,0][match[:,0] >= n] = -1 - match[:,1][match[:,1] >= m] = -1 - return ot_cost ** (1./order) , match - - # Comptuation of the otcost using the ot.emd2 library. - # Note: it is the Wasserstein distance to the power q. - # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? - ot_cost = ot.emd2(a, b, M, numItermax=2000000) - - return ot_cost ** (1./order) diff --git a/src/python/gudhi/wasserstein/__init__.py b/src/python/gudhi/wasserstein/__init__.py new file mode 100644 index 00000000..ed225ba4 --- /dev/null +++ b/src/python/gudhi/wasserstein/__init__.py @@ -0,0 +1 @@ +from .wasserstein import wasserstein_distance diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py new file mode 100644 index 00000000..079bcc57 --- /dev/null +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -0,0 +1,158 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Theo Lacombe +# +# Copyright (C) 2019 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + + +import ot +import numpy as np +import scipy.spatial.distance as sc + +from gudhi.wasserstein import wasserstein_distance + + +def _mean(x, m): + ''' + :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} + :param m: total amount of points taken into account, + that is we have (m-k) copies of diagonal + :returns: the weighted mean of x with (m-k) copies of the diagonal + ''' + k = len(x) + if k > 0: + w = np.mean(x, axis=0) + w_delta = (w[0] + w[1]) / 2 * np.ones(2) + return (k * w + (m-k) * w_delta) / m + else: + return np.array([0, 0]) + + +def lagrangian_barycenter(pdiagset, init=None, verbose=False): + ''' + :param pdiagset: a list of size m containing numpy.array of shape (n x 2) + (n can variate), encoding a set of + persistence diagrams with only finite coordinates. + :param init: The initial value for barycenter estimate. + If None, init is made on a random diagram from the dataset. + Otherwise, it must be an int + (then we init with diagset[init]) + or a (n x 2) numpy.array enconding + a persistence diagram with n points. + :param verbose: if True, returns additional information about the + barycenter. + :returns: If not verbose (default), a numpy.array encoding + the barycenter estimate of pdiagset + (local minima of the energy function). + If pdiagset is empty, returns None. + If verbose, returns a couple (Y, log) + where Y is the barycenter estimate, + and log is a dict that contains additional informations: + - groupings, a list of list of pairs (i,j), + That is, G[k] = [(i, j) ...], where (i,j) indicates + that X[i] is matched to Y[j] + if i = -1 or j = -1, it means they + represent the diagonal. + - energy, a float representing the Frechet + energy value obtained, + that is the mean of squared distances + of observations to the output. + - nb_iter, integer representing the number of iterations + performed before convergence of the algorithm. + ''' + X = pdiagset # to shorten notations, not a copy + m = len(X) # number of diagrams we are averaging + if m == 0: + print("Warning: computing barycenter of empty diag set. Returns None") + return None + + # store the number of off-diagonal point for each of the X_i + nb_off_diag = np.array([len(X_i) for X_i in X]) + # Initialisation of barycenter + if init is None: + i0 = np.random.randint(m) # Index of first state for the barycenter + Y = X[i0].copy() + else: + if type(init)==int: + Y = X[init].copy() + else: + Y = init.copy() + + nb_iter = 0 + + converged = False # stoping criterion + while not converged: + nb_iter += 1 + K = len(Y) # current nb of points in Y (some might be on diagonal) + G = np.full((K, m), -1, dtype=int) # will store for each j, the (index) + # point matched in each other diagram + #(might be the diagonal). + # that is G[j, i] = k <=> y_j is matched to + # x_k in the diagram i-th diagram X[i] + updated_points = np.zeros((K, 2)) # will store the new positions of + # the points of Y. + # If points disappear, there thrown + # on [0,0] by default. + new_created_points = [] # will store potential new points. + + # Step 1 : compute optimal matching (Y, X_i) for each X_i + # and create new points in Y if needed + for i in range(m): + _, indices = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) + for y_j, x_i_j in indices: + if y_j >= 0: # we matched an off diagonal point to x_i_j... + if x_i_j >= 0: # ...which is also an off-diagonal point. + G[y_j, i] = x_i_j + else: # ...which is a diagonal point + G[y_j, i] = -1 # -1 stands for the diagonal (mask) + else: # We matched a diagonal point to x_i_j... + if x_i_j >= 0: # which is a off-diag point ! + # need to create new point in Y + new_y = _mean(np.array([X[i][x_i_j]]), m) + # Average this point with (m-1) copies of Delta + new_created_points.append(new_y) + + # Step 2 : Update current point position thanks to groupings computed + to_delete = [] + for j in range(K): + matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] + new_y_j = _mean(matched_points, m) + if not np.array_equal(new_y_j, np.array([0,0])): + updated_points[j] = new_y_j + else: # this points is no longer of any use. + to_delete.append(j) + # we remove the point to be deleted now. + updated_points = np.delete(updated_points, to_delete, axis=0) + + # we cannot converge if there have been new created points. + if new_created_points: + Y = np.concatenate((updated_points, new_created_points)) + else: + # Step 3 : we check convergence + if np.array_equal(updated_points, Y): + converged = True + Y = updated_points + + + if verbose: + groupings = [] + energy = 0 + log = {} + n_y = len(Y) + for i in range(m): + cost, edges = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.) + groupings.append(edges) + energy += cost + log["groupings"] = groupings + energy = energy/m + print(energy) + log["energy"] = energy + log["nb_iter"] = nb_iter + + return Y, log + else: + return Y + diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py new file mode 100644 index 00000000..e1233eec --- /dev/null +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -0,0 +1,125 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Theo Lacombe +# +# Copyright (C) 2019 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + +import numpy as np +import scipy.spatial.distance as sc + +try: + import ot +except ImportError: + print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT") + +def _proj_on_diag(X): + ''' + :param X: (n x 2) array encoding the points of a persistent diagram. + :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal + ''' + Z = (X[:,0] + X[:,1]) / 2. + return np.array([Z , Z]).T + + +def _build_dist_matrix(X, Y, order=2., internal_p=2.): + ''' + :param X: (n x 2) numpy.array encoding the (points of the) first diagram. + :param Y: (m x 2) numpy.array encoding the second diagram. + :param order: exponent for the Wasserstein metric. + :param internal_p: Ground metric (i.e. norm L^p). + :returns: (n+1) x (m+1) np.array encoding the cost matrix C. + For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], + while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) + and its orthogonal projection onto the diagonal. + note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). + ''' + Xdiag = _proj_on_diag(X) + Ydiag = _proj_on_diag(Y) + if np.isinf(internal_p): + C = sc.cdist(X,Y, metric='chebyshev')**order + Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order + Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order + else: + C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order + Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order + Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order + Cf = np.hstack((C, Cxd[:,None])) + Cdy = np.append(Cdy, 0) + + Cf = np.vstack((Cf, Cdy[None,:])) + + return Cf + + +def _perstot(X, order, internal_p): + ''' + :param X: (n x 2) numpy.array (points of a given diagram). + :param order: exponent for Wasserstein. Default value is 2. + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). + :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). + ''' + Xdiag = _proj_on_diag(X) + return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) + + +def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): + ''' + :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points + (i.e. with infinite coordinate). + :param Y: (m x 2) numpy.array encoding the second diagram. + :param matching: if True, computes and returns the optimal matching between X and Y, encoded as + a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to + the j-th point in Y, with the convention (-1) represents the diagonal. + :param order: exponent for Wasserstein; Default value is 2. + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); + Default value is 2 (Euclidean norm). + :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with + respect to the internal_p-norm as ground metric. + If matching is set to True, also returns the optimal matching between X and Y. + ''' + n = len(X) + m = len(Y) + + # handle empty diagrams + if X.size == 0: + if Y.size == 0: + if not matching: + return 0. + else: + return 0., np.array([]) + else: + if not matching: + return _perstot(Y, order, internal_p) + else: + return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)]) + elif Y.size == 0: + if not matching: + return _perstot(X, order, internal_p) + else: + return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)]) + + M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) + a = np.ones(n+1) # weight vector of the input diagram. Uniform here. + a[-1] = m + b = np.ones(m+1) # weight vector of the input diagram. Uniform here. + b[-1] = n + + if matching: + P = ot.emd(a=a,b=b,M=M, numItermax=2000000) + ot_cost = np.sum(np.multiply(P,M)) + P[-1, -1] = 0 # Remove matching corresponding to the diagonal + match = np.argwhere(P) + # Now we turn to -1 points encoding the diagonal + match[:,0][match[:,0] >= n] = -1 + match[:,1][match[:,1] >= m] = -1 + return ot_cost ** (1./order) , match + + # Comptuation of the otcost using the ot.emd2 library. + # Note: it is the Wasserstein distance to the power q. + # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? + ot_cost = ot.emd2(a, b, M, numItermax=2000000) + + return ot_cost ** (1./order) -- cgit v1.2.3 From 266f1eb706ecf31733acbcdded3b04d8d270fb60 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 17:43:53 +0200 Subject: update CMakeLists to make things compatible with wasserstein/ repo --- src/python/CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b7d43bea..a91ca30a 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -56,7 +56,6 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") - set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'barycenter', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") @@ -217,8 +216,7 @@ if(PYTHONINTERP_FOUND) # Other .py files file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") - file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") - file(COPY "gudhi/barycenter.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( -- cgit v1.2.3 From af76331b5b4c709f46a3d705320bfedcf3a60924 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 18:08:05 +0200 Subject: correction typo user.rst --- src/python/doc/wasserstein_distance_user.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index a077f9a4..c6d49db1 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -128,7 +128,7 @@ per diagram). Basic example ------------- -This example computes the Frechet mean (aka Wasserstein barycenter) between +This example estimates the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. It is initialized on the 4th diagram. As the algorithm is not convex, its output depends on the initialization and @@ -143,7 +143,7 @@ Note that persistence diagrams must be submitted as .. testcode:: - import gudhi.barycenter + from gudhi.wasserstein.barycenter import lagrangian_barycenter import numpy as np dg1 = np.array([[0.2, 0.5]]) @@ -151,7 +151,7 @@ Note that persistence diagrams must be submitted as dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) dg4 = np.array([]) pdiagset = [dg1, dg2, dg3, dg4] - bary = gudhi.wasserstein.barycenter.lagrangian_barycenter(pdiagset=pdiagset,init=3) + bary = lagrangian_barycenter(pdiagset=pdiagset,init=3) message = "Wasserstein barycenter estimated:" print(message) -- cgit v1.2.3 From 9d89f57376e619515d99ad88c2cdeef35daaedd5 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 1 Apr 2020 09:04:18 +0200 Subject: code review: use operator[] instead of at() --- src/Alpha_complex/include/gudhi/Alpha_complex.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index eb4ef427..4369071c 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -248,6 +248,16 @@ class Alpha_complex { } } + /** \brief get_point_ returns the point corresponding to the vertex given as parameter. + * Only for internal use for faster access. + * + * @param[in] vertex Vertex handle of the point to retrieve. + * @return The point found. + */ + const Point_d& get_point_(std::size_t vertex) const { + return vertex_handle_to_iterator_[vertex]->point(); + } + template auto& get_cache(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) { auto k = cplx.key(s); @@ -258,7 +268,7 @@ class Alpha_complex { thread_local std::vector v; v.clear(); for (auto vertex : cplx.simplex_vertex_range(s)) - v.push_back(get_point(vertex)); + v.push_back(get_point_(vertex)); Point_d c = kernel_.construct_circumcenter_d_object()(v.cbegin(), v.cend()); typename Kernel::FT r = kernel_.squared_distance_d_object()(c, v[0]); cache_.emplace_back(std::move(c), std::move(r)); @@ -423,7 +433,7 @@ class Alpha_complex { while(shortiter != enditer && *longiter == *shortiter) { ++longiter; ++shortiter; } Vertex_handle extra = *longiter; auto const& cache=get_cache(complex, f_boundary); - bool is_gab = kernel_.squared_distance_d_object()(cache.first, get_point(extra)) >= cache.second; + bool is_gab = kernel_.squared_distance_d_object()(cache.first, get_point_(extra)) >= cache.second; #ifdef DEBUG_TRACES std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << extra << std::endl; #endif // DEBUG_TRACES -- cgit v1.2.3 From cfcbe923f132a770363e6a240df8f6911cdd39e9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 1 Apr 2020 10:34:48 +0200 Subject: improved doc, turns Basic examples as subsections using * --- src/python/doc/wasserstein_distance_sum.inc | 6 +++--- src/python/doc/wasserstein_distance_user.rst | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc index f10472bc..f9308e5e 100644 --- a/src/python/doc/wasserstein_distance_sum.inc +++ b/src/python/doc/wasserstein_distance_sum.inc @@ -4,10 +4,10 @@ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe | | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | | - | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Introduced in: GUDHI 3.1.0 | + | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 | | | barycenters of a family of persistence diagrams. | | - | Wasserstein distance is the q-th root of the sum of the | | :Copyright: MIT | - | edge lengths to the power q. | | | + | | | :License: MIT | + | | | | | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ | * :doc:`wasserstein_distance_user` | | diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index c6d49db1..c5c250b5 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -9,7 +9,7 @@ Definition .. include:: wasserstein_distance_sum.inc -The q-Wasserstein distance is defined as the minimal value +The q-Wasserstein distance is defined as the minimal value achieved by a perfect matching between the points of the two diagrams (+ all diagonal points), where the value of a matching is defined as the q-th root of the sum of all edge lengths to the power q. Edge lengths @@ -32,7 +32,7 @@ Morozov, and Arnur Nigmetov. .. autofunction:: gudhi.hera.wasserstein_distance Basic example -------------- +************* This example computes the 1-Wasserstein distance from 2 persistence diagrams with Euclidean ground metric. Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. @@ -123,10 +123,10 @@ per diagram). diagrams. -.. autofunction:: gudhi.barycenter.lagrangian_barycenter +.. autofunction:: gudhi.wasserstein.barycenter.lagrangian_barycenter Basic example -------------- +************* This example estimates the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. @@ -135,7 +135,7 @@ As the algorithm is not convex, its output depends on the initialization and is only a local minimum of the objective function. Initialization can be either given as an integer (in which case the i-th diagram of the list is used as initial estimate) or as a diagram. -If None, it will randomly select one of the diagram of the list +If None, it will randomly select one of the diagrams of the list as initial estimate. Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. -- cgit v1.2.3 From 0b19e1f991fdebbdb622d3101135eaee65c4ed5d Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 1 Apr 2020 14:45:37 +0200 Subject: Split the cache per dimension Try to reduce slightly the memory use. --- src/Alpha_complex/include/gudhi/Alpha_complex.h | 33 +++++++++++++++++++------ 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h index 4369071c..ba91998d 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h @@ -112,9 +112,6 @@ class Alpha_complex { typedef typename Kernel::Side_of_bounded_sphere_d Is_Gabriel; typedef typename Kernel::Point_dimension_d Point_Dimension; - // Type required to compute squared radius, or side of bounded sphere on a vector of points. - typedef typename std::vector Vector_of_CGAL_points; - // Vertex_iterator type from CGAL. typedef typename Delaunay_triangulation::Vertex_iterator CGAL_vertex_iterator; @@ -124,6 +121,9 @@ class Alpha_complex { // Structure to switch from simplex tree vertex handle to CGAL vertex iterator. typedef typename std::vector< CGAL_vertex_iterator > Vector_vertex_iterator; + // Numeric type of coordinates in the kernel + typedef typename Kernel::FT FT; + private: /** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator. * Vertex handles are inserted sequentially, starting at 0.*/ @@ -133,7 +133,7 @@ class Alpha_complex { /** \brief Kernel for triangulation_ functions access.*/ Kernel kernel_; /** \brief Cache for geometric constructions: circumcenter and squared radius of a simplex.*/ - std::vector> cache_; + std::vector> cache_, old_cache_; public: /** \brief Alpha_complex constructor from an OFF file name. @@ -258,24 +258,39 @@ class Alpha_complex { return vertex_handle_to_iterator_[vertex]->point(); } + /// Return a reference to the circumcenter and circumradius, writing them in the cache if necessary. template auto& get_cache(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) { auto k = cplx.key(s); if(k==cplx.null_key()){ k = cache_.size(); cplx.assign_key(s, k); - // Use a transform_range? Check the impact on perf. + // Using a transform_range is slower, currently. thread_local std::vector v; v.clear(); for (auto vertex : cplx.simplex_vertex_range(s)) v.push_back(get_point_(vertex)); Point_d c = kernel_.construct_circumcenter_d_object()(v.cbegin(), v.cend()); - typename Kernel::FT r = kernel_.squared_distance_d_object()(c, v[0]); + FT r = kernel_.squared_distance_d_object()(c, v[0]); cache_.emplace_back(std::move(c), std::move(r)); } return cache_[k]; } + /// Return the circumradius, either from the old cache or computed, without writing to the cache. + template + auto radius(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) { + auto k = cplx.key(s); + if(k!=cplx.null_key()) + return old_cache_[k].second; + // Using a transform_range is slower, currently. + thread_local std::vector v; + v.clear(); + for (auto vertex : cplx.simplex_vertex_range(s)) + v.push_back(get_point_(vertex)); + return kernel_.compute_squared_radius_d_object()(v.cbegin(), v.cend()); + } + public: /** \brief Inserts all Delaunay triangulation into the simplicial complex. * It also computes the filtration values accordingly to the \ref createcomplexalgorithm if default_filtration_value @@ -365,11 +380,11 @@ class Alpha_complex { Filtration_value alpha_complex_filtration = 0.0; // No need to compute squared_radius on a single point - alpha is 0.0 if (f_simplex_dim > 0) { - auto const& sqrad = get_cache(complex, f_simplex).second; + auto const& sqrad = radius(complex, f_simplex); #if CGAL_VERSION_NR >= 1050000000 if(exact) CGAL::exact(sqrad); #endif - CGAL::NT_converter cv; + CGAL::NT_converter cv; alpha_complex_filtration = cv(sqrad); } complex.assign_filtration(f_simplex, alpha_complex_filtration); @@ -382,6 +397,8 @@ class Alpha_complex { propagate_alpha_filtration(complex, f_simplex); } } + old_cache_ = std::move(cache_); + cache_.clear(); } // -------------------------------------------------------------------------------------------- -- cgit v1.2.3 From c36080ab9e478cd0d44bfd8d5bb8f4726a8aa937 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 1 Apr 2020 20:24:01 +0200 Subject: improved doc readability --- src/python/gudhi/wasserstein/barycenter.py | 54 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index 079bcc57..fae6b68f 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -33,35 +33,37 @@ def _mean(x, m): def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' - :param pdiagset: a list of size m containing numpy.array of shape (n x 2) - (n can variate), encoding a set of + :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` + (`n` can variate), encoding a set of persistence diagrams with only finite coordinates. :param init: The initial value for barycenter estimate. - If None, init is made on a random diagram from the dataset. - Otherwise, it must be an int - (then we init with diagset[init]) - or a (n x 2) numpy.array enconding - a persistence diagram with n points. - :param verbose: if True, returns additional information about the + If ``None``, init is made on a random diagram from the dataset. + Otherwise, it can be an ``int`` + (then initialization is made on ``pdiagset[init]``) + or a `(n x 2)` ``numpy.array`` enconding + a persistence diagram with `n` points. + :type init: int, (n x 2) np.array + :param verbose: if ``True``, returns additional information about the barycenter. - :returns: If not verbose (default), a numpy.array encoding - the barycenter estimate of pdiagset - (local minima of the energy function). - If pdiagset is empty, returns None. - If verbose, returns a couple (Y, log) - where Y is the barycenter estimate, - and log is a dict that contains additional informations: - - groupings, a list of list of pairs (i,j), - That is, G[k] = [(i, j) ...], where (i,j) indicates - that X[i] is matched to Y[j] - if i = -1 or j = -1, it means they - represent the diagonal. - - energy, a float representing the Frechet - energy value obtained, - that is the mean of squared distances - of observations to the output. - - nb_iter, integer representing the number of iterations - performed before convergence of the algorithm. + :type verbose: boolean + :returns: If not verbose (default), a ``numpy.array`` encoding + the barycenter estimate of pdiagset + (local minimum of the energy function). + If ``pdiagset`` is empty, returns ``None``. + If verbose, returns a couple ``(Y, log)`` + where ``Y`` is the barycenter estimate, + and ``log`` is a ``dict`` that contains additional informations: + + - `"groupings"`, a list of list of pairs ``(i,j)``. + Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates + that ``pdiagset[k][i]`` is matched to ``Y[j]`` + if ``i = -1`` or ``j = -1``, it means they + represent the diagonal. + + - `"energy"`, ``float`` representing the Frechet energy value obtained. + It is the mean of squared distances of observations to the output. + + - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm. ''' X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging -- cgit v1.2.3 From 731358cbfe3880b02a58c70923b5a990ddff7644 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 1 Apr 2020 20:27:27 +0200 Subject: improved doc, adding double quot for init --- src/python/gudhi/wasserstein/barycenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index fae6b68f..e879b7dd 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -42,7 +42,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): (then initialization is made on ``pdiagset[init]``) or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points. - :type init: int, (n x 2) np.array + :type init: ``int``, or (n x 2) ``np.array`` :param verbose: if ``True``, returns additional information about the barycenter. :type verbose: boolean -- cgit v1.2.3 From 115dacbd482a75b304c0f44ef84d59f8d141c1df Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Thu, 2 Apr 2020 10:39:26 +0200 Subject: Update next_release.md --- .github/next_release.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/next_release.md b/.github/next_release.md index 3166b0a8..83b98a1c 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -9,6 +9,7 @@ Below is a list of changes made since GUDHI 3.1.1: - [Wassertein distance](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html) - An another implementation comes from Hera (BSD-3-Clause) which is based on [Geometry Helps to Compare Persistence Diagrams](http://doi.acm.org/10.1145/3064175) by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov. + - `gudhi.wasserstein.wasserstein_distance` has now an option to return the optimal matching that achieves the distance between the two diagrams. - [Module](link) - ... -- cgit v1.2.3 From 6f3b4ae5be6ce404bb35c8dfec34b5239c0856fe Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 3 Apr 2020 09:42:33 +0200 Subject: openssl issue with pip --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 95b15db2..e4d7c814 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -10,7 +10,7 @@ jobs: macOSrelease: imageName: 'macos-10.14' CMakeBuildType: Release - customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' + customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal && brew upgrade openssl' pool: vmImage: $(imageName) -- cgit v1.2.3 From 6bae999949d3305884ca2cc1ecb4bf093e1710e6 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 3 Apr 2020 09:47:15 +0200 Subject: rollback --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e4d7c814..95b15db2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -10,7 +10,7 @@ jobs: macOSrelease: imageName: 'macos-10.14' CMakeBuildType: Release - customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal && brew upgrade openssl' + customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' pool: vmImage: $(imageName) -- cgit v1.2.3 From 4cfe8411f808f52bee0ba37e28fa9f6cc3519abb Mon Sep 17 00:00:00 2001 From: tlacombe Date: Fri, 3 Apr 2020 17:27:47 +0200 Subject: removed the print of energy in verbose mode, added by error --- src/python/gudhi/wasserstein/barycenter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index e879b7dd..99f29a1e 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -150,7 +150,6 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): energy += cost log["groupings"] = groupings energy = energy/m - print(energy) log["energy"] = energy log["nb_iter"] = nb_iter -- cgit v1.2.3 From 6acbc89d185d1c537778fb2d4a8503bab61fca31 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 3 Apr 2020 21:04:52 +0200 Subject: Split compute_persistence from get_persistence. --- src/python/gudhi/cubical_complex.pyx | 6 +++-- src/python/gudhi/periodic_cubical_complex.pyx | 6 +++-- src/python/gudhi/simplex_tree.pxd | 3 ++- src/python/gudhi/simplex_tree.pyx | 6 +++-- .../include/Persistent_cohomology_interface.h | 29 ++++++++++++---------- 5 files changed, 30 insertions(+), 20 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index d5ad1266..ce844558 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -35,7 +35,8 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi": cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>": Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) - vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + void compute_persistence(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[double, double]]] get_persistence() vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -149,7 +150,8 @@ cdef class CubicalComplex: self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) cdef vector[pair[int, pair[double, double]]] persistence_result if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + persistence_result = self.pcohptr.get_persistence() return persistence_result def betti_numbers(self): diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index fd08b976..ff5ef3bd 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -32,7 +32,8 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi": cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface>>": Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) - vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + void compute_persistence(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[double, double]]] get_persistence() vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) @@ -154,7 +155,8 @@ cdef class PeriodicCubicalComplex: self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True) cdef vector[pair[int, pair[double, double]]] persistence_result if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + persistence_result = self.pcohptr.get_persistence() return persistence_result def betti_numbers(self): diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 595f22bb..44040bcb 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -71,7 +71,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) - vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence) + void compute_persistence(int homology_coeff_field, double min_persistence) + vector[pair[int, pair[double, double]]] get_persistence() vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index cc3753e1..69e645b4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -443,7 +443,8 @@ cdef class SimplexTree: if self.pcohptr != NULL: del self.pcohptr self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False) - persistence_result = self.pcohptr.get_persistence(homology_coeff_field, -1.) + self.pcohptr.compute_persistence(homology_coeff_field, -1.) + persistence_result = self.pcohptr.get_persistence() return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence) @@ -470,7 +471,8 @@ cdef class SimplexTree: self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) cdef vector[pair[int, pair[double, double]]] persistence_result if self.pcohptr != NULL: - persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence) + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + persistence_result = self.pcohptr.get_persistence() return persistence_result def betti_numbers(self): diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 8c79e6f3..a29ebbee 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -23,6 +23,7 @@ template class Persistent_cohomology_interface : public persistent_cohomology::Persistent_cohomology { private: + typedef persistent_cohomology::Persistent_cohomology Base; /* * Compare two intervals by dimension, then by length. */ @@ -43,25 +44,28 @@ persistent_cohomology::Persistent_cohomology(*stptr), + : Base(*stptr), stptr_(stptr) { } Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max) - : persistent_cohomology::Persistent_cohomology(*stptr, persistence_dim_max), + : Base(*stptr, persistence_dim_max), stptr_(stptr) { } - std::vector>> get_persistence(int homology_coeff_field, - double min_persistence) { - persistent_cohomology::Persistent_cohomology::init_coefficients(homology_coeff_field); - persistent_cohomology::Persistent_cohomology::compute_persistent_cohomology(min_persistence); + void compute_persistence(int homology_coeff_field, double min_persistence) { + Base::init_coefficients(homology_coeff_field); + Base::compute_persistent_cohomology(min_persistence); + } + + void maybe_compute_persistence(int homology_coeff_field, double min_persistence) { + // Currently get_persistent_pairs safely returns an empty vector before compute_persistent_cohomology + if(Base::get_persistent_pairs().empty()) + compute_persistence(homology_coeff_field, min_persistence); + } + std::vector>> get_persistence() { // Custom sort and output persistence cmp_intervals_by_dim_then_length cmp(stptr_); - auto persistent_pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); + auto persistent_pairs = Base::get_persistent_pairs(); std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp); std::vector>> persistence; @@ -74,8 +78,7 @@ persistent_cohomology::Persistent_cohomology, std::vector>> persistence_pairs() { - auto pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); + auto pairs = Base::get_persistent_pairs(); std::vector, std::vector>> persistence_pairs; persistence_pairs.reserve(pairs.size()); -- cgit v1.2.3 From 7830d93607257fd75f09b371e88741a517347579 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 3 Apr 2020 21:11:57 +0200 Subject: Dead code --- src/python/include/Simplex_tree_interface.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h index 1a18aed6..27b123f8 100644 --- a/src/python/include/Simplex_tree_interface.h +++ b/src/python/include/Simplex_tree_interface.h @@ -16,8 +16,6 @@ #include #include -#include "Persistent_cohomology_interface.h" - #include #include #include // std::pair @@ -157,11 +155,6 @@ class Simplex_tree_interface : public Simplex_tree { return new_dgm; } - void create_persistence(Gudhi::Persistent_cohomology_interface* pcoh) { - Base::initialize_filtration(); - pcoh = new Gudhi::Persistent_cohomology_interface(*this); - } - // Iterator over the simplex tree Complex_simplex_iterator get_simplices_iterator_begin() { // this specific case works because the range is just a pair of iterators - won't work if range was a vector -- cgit v1.2.3 From b2cfc0691147ca122861bc423d41495c4b444dde Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 3 Apr 2020 21:27:01 +0200 Subject: Simplify some code --- src/python/gudhi/simplex_tree.pyx | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 69e645b4..d8bd0b79 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -413,7 +413,7 @@ cdef class SimplexTree: Note that this code creates an extra vertex internally, so you should make sure that the Simplex_tree does not contain a vertex with the largest possible value (i.e., 4294967295). """ - return self.get_ptr().compute_extended_filtration() + self.get_ptr().compute_extended_filtration() def extended_persistence(self, homology_coeff_field=11, min_persistence=0): """This function retrieves good values for extended persistence, and separate the diagrams @@ -469,11 +469,8 @@ cdef class SimplexTree: if self.pcohptr != NULL: del self.pcohptr self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) - cdef vector[pair[int, pair[double, double]]] persistence_result - if self.pcohptr != NULL: - self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) - persistence_result = self.pcohptr.get_persistence() - return persistence_result + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + return self.pcohptr.get_persistence() def betti_numbers(self): """This function returns the Betti numbers of the simplicial complex. -- cgit v1.2.3 From f0224ea1c97c7dcb32debeda176139ba10bd21e7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 4 Apr 2020 05:39:19 +0200 Subject: Local bibliographies in sphinx --- src/python/doc/alpha_complex_user.rst | 2 +- src/python/doc/bottleneck_distance_user.rst | 7 +++++++ src/python/doc/cubical_complex_user.rst | 2 +- src/python/doc/index.rst | 2 +- src/python/doc/nerve_gic_complex_user.rst | 7 +++++++ src/python/doc/persistent_cohomology_user.rst | 2 +- src/python/doc/rips_complex_user.rst | 7 +++++++ src/python/doc/simplex_tree_user.rst | 7 +++++++ src/python/doc/tangential_complex_user.rst | 2 +- src/python/doc/wasserstein_distance_user.rst | 7 +++++++ src/python/doc/witness_complex_user.rst | 2 +- 11 files changed, 41 insertions(+), 6 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 60319e84..6e926fc8 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -207,5 +207,5 @@ CGAL citations ============== .. bibliography:: ../../biblio/how_to_cite_cgal.bib - :filter: docnames + :filter: docname in docnames :style: unsrt diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst index 9435c7f1..95c4e575 100644 --- a/src/python/doc/bottleneck_distance_user.rst +++ b/src/python/doc/bottleneck_distance_user.rst @@ -65,3 +65,10 @@ The output is: Bottleneck distance approximation = 0.81 Bottleneck distance value = 0.75 + +Bibliography +============ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index 93ca6b24..94f59954 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -163,5 +163,5 @@ Bibliography ============ .. bibliography:: ../../biblio/bibliography.bib - :filter: docnames + :filter: docname in docnames :style: unsrt diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index 3387a64f..df1dff68 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -90,5 +90,5 @@ Bibliography ************ .. bibliography:: ../../biblio/bibliography.bib - :filter: docnames + :filter: docname in docnames :style: unsrt diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index 9101f45d..208031fb 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -313,3 +313,10 @@ the program outputs again SC.dot which gives the following visualization after u :alt: Visualization with neato Visualization with neato + +Bibliography +============ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index 5f931b3a..0a5be3a9 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -116,5 +116,5 @@ Bibliography ============ .. bibliography:: ../../biblio/bibliography.bib - :filter: docnames + :filter: docname in docnames :style: unsrt diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index 8efb12e6..325added 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -347,3 +347,10 @@ until dimension 1 - one skeleton graph in other words), the output is: points in the persistence diagram will be under the diagonal, and bottleneck distance and persistence graphical tool will not work properly, this is a known issue. + +Bibliography +============ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst index 3df7617f..b0b7153e 100644 --- a/src/python/doc/simplex_tree_user.rst +++ b/src/python/doc/simplex_tree_user.rst @@ -66,3 +66,10 @@ The output is: ([1, 2], 4.0) ([1], 0.0) ([2], 4.0) + +Bibliography +============ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst index 852cf5b6..0bcbc848 100644 --- a/src/python/doc/tangential_complex_user.rst +++ b/src/python/doc/tangential_complex_user.rst @@ -200,5 +200,5 @@ Bibliography ============ .. bibliography:: ../../biblio/bibliography.bib - :filter: docnames + :filter: docname in docnames :style: unsrt diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index a9b21fa5..9b94573e 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -84,3 +84,10 @@ The output is: point 1 in dgm1 is matched to point 2 in dgm2 point 2 in dgm1 is matched to the diagonal point 1 in dgm2 is matched to the diagonal + +Bibliography +============ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst index 7087fa98..b932ed0d 100644 --- a/src/python/doc/witness_complex_user.rst +++ b/src/python/doc/witness_complex_user.rst @@ -131,5 +131,5 @@ Bibliography ============ .. bibliography:: ../../biblio/bibliography.bib - :filter: docnames + :filter: docname in docnames :style: unsrt -- cgit v1.2.3 From d9e6b4f51bc8517453653be2904ab6db9aaab85e Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 4 Apr 2020 06:01:59 +0200 Subject: sphinx label warnings --- src/python/doc/alpha_complex_user.rst | 1 + src/python/doc/bottleneck_distance_user.rst | 1 + src/python/doc/cubical_complex_user.rst | 1 + src/python/doc/index.rst | 1 + src/python/doc/nerve_gic_complex_user.rst | 1 + src/python/doc/persistent_cohomology_user.rst | 1 + src/python/doc/rips_complex_user.rst | 1 + src/python/doc/simplex_tree_user.rst | 1 + src/python/doc/tangential_complex_user.rst | 1 + src/python/doc/wasserstein_distance_user.rst | 1 + src/python/doc/witness_complex_user.rst | 1 + 11 files changed, 11 insertions(+) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 6e926fc8..e1903688 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -209,3 +209,4 @@ CGAL citations .. bibliography:: ../../biblio/how_to_cite_cgal.bib :filter: docname in docnames :style: unsrt + :labelprefix: A diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst index 95c4e575..23a87c19 100644 --- a/src/python/doc/bottleneck_distance_user.rst +++ b/src/python/doc/bottleneck_distance_user.rst @@ -72,3 +72,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: B diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index 94f59954..cdc5b5dc 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -165,3 +165,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: CC diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index df1dff68..089efe23 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -92,3 +92,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: I diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index 208031fb..b022dca7 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -320,3 +320,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: N diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index 0a5be3a9..f97fc759 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -118,3 +118,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: PC diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index 325added..fb6e4b1b 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -354,3 +354,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: R diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst index b0b7153e..5a97b3d7 100644 --- a/src/python/doc/simplex_tree_user.rst +++ b/src/python/doc/simplex_tree_user.rst @@ -73,3 +73,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: ST diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst index 0bcbc848..6cdd6125 100644 --- a/src/python/doc/tangential_complex_user.rst +++ b/src/python/doc/tangential_complex_user.rst @@ -202,3 +202,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: TA diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 9b94573e..817e6981 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -91,3 +91,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: WA diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst index b932ed0d..c258ad38 100644 --- a/src/python/doc/witness_complex_user.rst +++ b/src/python/doc/witness_complex_user.rst @@ -133,3 +133,4 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt + :labelprefix: WI -- cgit v1.2.3 From dc80ab48359521dac415292f4d2b1f496f326263 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 4 Apr 2020 06:05:57 +0200 Subject: Revert "sphinx label warnings" This reverts commit d9e6b4f51bc8517453653be2904ab6db9aaab85e. It was able to remove the warnings about duplicate labels, but then it shows [WA1] instead of [1] in the generated doc. And for things cited on multiple pages, it uses the same everywhere, so on a single page, you can have a mix of [I1], [WI2], etc. Not very pretty. --- src/python/doc/alpha_complex_user.rst | 1 - src/python/doc/bottleneck_distance_user.rst | 1 - src/python/doc/cubical_complex_user.rst | 1 - src/python/doc/index.rst | 1 - src/python/doc/nerve_gic_complex_user.rst | 1 - src/python/doc/persistent_cohomology_user.rst | 1 - src/python/doc/rips_complex_user.rst | 1 - src/python/doc/simplex_tree_user.rst | 1 - src/python/doc/tangential_complex_user.rst | 1 - src/python/doc/wasserstein_distance_user.rst | 1 - src/python/doc/witness_complex_user.rst | 1 - 11 files changed, 11 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index e1903688..6e926fc8 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -209,4 +209,3 @@ CGAL citations .. bibliography:: ../../biblio/how_to_cite_cgal.bib :filter: docname in docnames :style: unsrt - :labelprefix: A diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst index 23a87c19..95c4e575 100644 --- a/src/python/doc/bottleneck_distance_user.rst +++ b/src/python/doc/bottleneck_distance_user.rst @@ -72,4 +72,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: B diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index cdc5b5dc..94f59954 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -165,4 +165,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: CC diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index 089efe23..df1dff68 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -92,4 +92,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: I diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index b022dca7..208031fb 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -320,4 +320,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: N diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index f97fc759..0a5be3a9 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -118,4 +118,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: PC diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index fb6e4b1b..325added 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -354,4 +354,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: R diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst index 5a97b3d7..b0b7153e 100644 --- a/src/python/doc/simplex_tree_user.rst +++ b/src/python/doc/simplex_tree_user.rst @@ -73,4 +73,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: ST diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst index 6cdd6125..0bcbc848 100644 --- a/src/python/doc/tangential_complex_user.rst +++ b/src/python/doc/tangential_complex_user.rst @@ -202,4 +202,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: TA diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 817e6981..9b94573e 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -91,4 +91,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: WA diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst index c258ad38..b932ed0d 100644 --- a/src/python/doc/witness_complex_user.rst +++ b/src/python/doc/witness_complex_user.rst @@ -133,4 +133,3 @@ Bibliography .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames :style: unsrt - :labelprefix: WI -- cgit v1.2.3 From da3b4a79ca40d08ae5597341f4db2418f20fe3d2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 4 Apr 2020 12:52:52 +0200 Subject: Missing biblio in one file, change title level --- src/python/doc/alpha_complex_user.rst | 2 +- src/python/doc/bottleneck_distance_user.rst | 2 +- src/python/doc/cubical_complex_user.rst | 2 +- src/python/doc/nerve_gic_complex_ref.rst | 7 +++++++ src/python/doc/nerve_gic_complex_user.rst | 2 +- src/python/doc/persistent_cohomology_user.rst | 2 +- src/python/doc/rips_complex_user.rst | 2 +- src/python/doc/simplex_tree_user.rst | 2 +- src/python/doc/tangential_complex_user.rst | 2 +- src/python/doc/wasserstein_distance_user.rst | 2 +- src/python/doc/witness_complex_user.rst | 2 +- 11 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 6e926fc8..265a82d2 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -204,7 +204,7 @@ the program output is: [3, 6] -> 30.25 CGAL citations -============== +-------------- .. bibliography:: ../../biblio/how_to_cite_cgal.bib :filter: docname in docnames diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst index 95c4e575..206fcb63 100644 --- a/src/python/doc/bottleneck_distance_user.rst +++ b/src/python/doc/bottleneck_distance_user.rst @@ -67,7 +67,7 @@ The output is: Bottleneck distance value = 0.75 Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index 94f59954..e8c94bf6 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -160,7 +160,7 @@ Examples. End user programs are available in python/example/ folder. Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/nerve_gic_complex_ref.rst b/src/python/doc/nerve_gic_complex_ref.rst index abde2e8c..6a81b7af 100644 --- a/src/python/doc/nerve_gic_complex_ref.rst +++ b/src/python/doc/nerve_gic_complex_ref.rst @@ -12,3 +12,10 @@ Cover complexes reference manual :show-inheritance: .. automethod:: gudhi.CoverComplex.__init__ + +Bibliography +------------ + +.. bibliography:: ../../biblio/bibliography.bib + :filter: docname in docnames + :style: unsrt diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index 208031fb..f709ce91 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -315,7 +315,7 @@ the program outputs again SC.dot which gives the following visualization after u Visualization with neato Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index 0a5be3a9..506fa3a7 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -113,7 +113,7 @@ We provide several example files: run these examples with -h for details on thei * :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>` Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index 325added..c4bbcfb6 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -349,7 +349,7 @@ until dimension 1 - one skeleton graph in other words), the output is: this is a known issue. Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst index b0b7153e..1b272c35 100644 --- a/src/python/doc/simplex_tree_user.rst +++ b/src/python/doc/simplex_tree_user.rst @@ -68,7 +68,7 @@ The output is: ([2], 4.0) Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst index 0bcbc848..cf8199cc 100644 --- a/src/python/doc/tangential_complex_user.rst +++ b/src/python/doc/tangential_complex_user.rst @@ -197,7 +197,7 @@ The output is: Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 9b94573e..2ae72351 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -86,7 +86,7 @@ The output is: point 1 in dgm2 is matched to the diagonal Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst index b932ed0d..799f5444 100644 --- a/src/python/doc/witness_complex_user.rst +++ b/src/python/doc/witness_complex_user.rst @@ -128,7 +128,7 @@ Here is an example of constructing a strong witness complex filtration and compu * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` Bibliography -============ +------------ .. bibliography:: ../../biblio/bibliography.bib :filter: docname in docnames -- cgit v1.2.3 From 3ca13b31e5f48fbaef2ba7db980643716c18725c Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 5 Apr 2020 00:35:23 +0200 Subject: compute_persistence in python Also simplify references, and replace print with assert for errors --- src/python/gudhi/simplex_tree.pyx | 105 ++++++++++----------- .../include/Persistent_cohomology_interface.h | 13 +-- 2 files changed, 52 insertions(+), 66 deletions(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index d8bd0b79..c34a64e6 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -139,9 +139,9 @@ cdef class SimplexTree: This function is not constant time because it can recompute dimension if required (can be triggered by - :func:`remove_maximal_simplex()` + :func:`remove_maximal_simplex` or - :func:`prune_above_filtration()` + :func:`prune_above_filtration` methods). """ return self.get_ptr().dimension() @@ -166,9 +166,9 @@ cdef class SimplexTree: This function must be used with caution because it disables dimension recomputation when required (this recomputation can be triggered by - :func:`remove_maximal_simplex()` + :func:`remove_maximal_simplex` or - :func:`prune_above_filtration()` + :func:`prune_above_filtration` ). """ self.get_ptr().set_dimension(dimension) @@ -315,10 +315,10 @@ cdef class SimplexTree: The dimension of the simplicial complex may be lower after calling remove_maximal_simplex than it was before. However, - :func:`upper_bound_dimension()` + :func:`upper_bound_dimension` method will return the old value, which remains a valid upper bound. If you care, you can call - :func:`dimension()` + :func:`dimension` to recompute the exact dimension. """ self.get_ptr().remove_maximal_simplex(simplex) @@ -346,12 +346,12 @@ cdef class SimplexTree: Note that the dimension of the simplicial complex may be lower after calling - :func:`prune_above_filtration()` + :func:`prune_above_filtration` than it was before. However, - :func:`upper_bound_dimension()` + :func:`upper_bound_dimension` will return the old value, which remains a valid upper bound. If you care, you can call - :func:`dimension()` + :func:`dimension` method to recompute the exact dimension. """ return self.get_ptr().prune_above_filtration(filtration) @@ -405,7 +405,7 @@ cdef class SimplexTree: Note that after calling this function, the filtration values are actually modified within the Simplex_tree. - The function :func:`extended_persistence()` + The function :func:`extended_persistence` retrieves the original values. .. note:: @@ -427,11 +427,11 @@ cdef class SimplexTree: 0.0. Sets min_persistence to -1.0 to see all values. :type min_persistence: float. - :returns: A list of four persistence diagrams in the format described in :func:`persistence()`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. + :returns: A list of four persistence diagrams in the format described in :func:`persistence`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes. .. note:: - This function should be called only if :func:`extend_filtration()` has been called first! + This function should be called only if :func:`extend_filtration` has been called first! .. note:: @@ -466,11 +466,32 @@ cdef class SimplexTree: :returns: The persistence of the simplicial complex. :rtype: list of pairs(dimension, pair(birth, death)) """ + self.compute_persistence(homology_coeff_field, min_persistence, persistence_dim_max) + return self.pcohptr.get_persistence() + + def compute_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): + """This function computes the persistence of the simplicial complex, so it can be accessed through + :func:`persistent_betti_numbers`, :func:`persistence_pairs`, etc. This function is equivalent to :func:`persistence` + when you do not want the list :func:`persistence` returns. + + :param homology_coeff_field: The homology coefficient field. Must be a + prime number. Default value is 11. + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :param persistence_dim_max: If true, the persistent homology for the + maximal dimension in the complex is computed. If false, it is + ignored. Default is false. + :type persistence_dim_max: bool + :returns: Nothing. + """ if self.pcohptr != NULL: del self.pcohptr self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) - return self.pcohptr.get_persistence() def betti_numbers(self): """This function returns the Betti numbers of the simplicial complex. @@ -479,16 +500,11 @@ cdef class SimplexTree: :rtype: list of int :note: betti_numbers function requires - :func:`persistence()` + :func:`compute_persistence` function to be launched first. """ - cdef vector[int] bn_result - if self.pcohptr != NULL: - bn_result = self.pcohptr.betti_numbers() - else: - print("betti_numbers function requires persistence function" - " to be launched first.") - return bn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()" + return self.pcohptr.betti_numbers() def persistent_betti_numbers(self, from_value, to_value): """This function returns the persistent Betti numbers of the @@ -505,16 +521,11 @@ cdef class SimplexTree: :rtype: list of int :note: persistent_betti_numbers function requires - :func:`persistence()` + :func:`compute_persistence` function to be launched first. """ - cdef vector[int] pbn_result - if self.pcohptr != NULL: - pbn_result = self.pcohptr.persistent_betti_numbers(from_value, to_value) - else: - print("persistent_betti_numbers function requires persistence function" - " to be launched first.") - return pbn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()" + return self.pcohptr.persistent_betti_numbers(from_value, to_value) def persistence_intervals_in_dimension(self, dimension): """This function returns the persistence intervals of the simplicial @@ -526,16 +537,11 @@ cdef class SimplexTree: :rtype: numpy array of dimension 2 :note: intervals_in_dim function requires - :func:`persistence()` + :func:`compute_persistence` function to be launched first. """ - cdef vector[pair[double,double]] intervals_result - if self.pcohptr != NULL: - intervals_result = self.pcohptr.intervals_in_dimension(dimension) - else: - print("intervals_in_dim function requires persistence function" - " to be launched first.") - return np_array(intervals_result) + assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()" + return np_array(self.pcohptr.intervals_in_dimension(dimension)) def persistence_pairs(self): """This function returns a list of persistence birth and death simplices pairs. @@ -544,18 +550,13 @@ cdef class SimplexTree: :rtype: list of pair of list of int :note: persistence_pairs function requires - :func:`persistence()` + :func:`compute_persistence` function to be launched first. """ - cdef vector[pair[vector[int],vector[int]]] persistence_pairs_result - if self.pcohptr != NULL: - persistence_pairs_result = self.pcohptr.persistence_pairs() - else: - print("persistence_pairs function requires persistence function" - " to be launched first.") - return persistence_pairs_result + assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_pairs()" + return self.pcohptr.persistence_pairs() - def write_persistence_diagram(self, persistence_file=''): + def write_persistence_diagram(self, persistence_file): """This function writes the persistence intervals of the simplicial complex in a user given file name. @@ -563,14 +564,8 @@ cdef class SimplexTree: :type persistence_file: string. :note: intervals_in_dim function requires - :func:`persistence()` + :func:`compute_persistence` function to be launched first. """ - if self.pcohptr != NULL: - if persistence_file != '': - self.pcohptr.write_output_diagram(persistence_file.encode('utf-8')) - else: - print("persistence_file must be specified") - else: - print("intervals_in_dim function requires persistence function" - " to be launched first.") + assert self.pcohptr != NULL, "compute_persistence() must be called before write_persistence_diagram()" + self.pcohptr.write_output_diagram(persistence_file.encode('utf-8')) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index a29ebbee..e2b69a52 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -43,25 +43,16 @@ persistent_cohomology::Persistent_cohomology>> get_persistence() { // Custom sort and output persistence cmp_intervals_by_dim_then_length cmp(stptr_); -- cgit v1.2.3 From 73a40006dad55b0a9ce6ca270e566ce91efe6af4 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 5 Apr 2020 12:27:15 +0200 Subject: Proper exception in write_output_diagram --- src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h | 1 + src/python/gudhi/simplex_tree.pxd | 2 +- src/python/gudhi/simplex_tree.pyx | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index ca4bc10d..5e41edb4 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -571,6 +571,7 @@ class Persistent_cohomology { void write_output_diagram(std::string diagram_name) { std::ofstream diagram_out(diagram_name.c_str()); + diagram_out.exceptions(diagram_out.failbit); cmp_intervals_by_length cmp(cpx_); std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp); bool has_infinity = std::numeric_limits::has_infinity; diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 44040bcb..c46b36ba 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -76,5 +76,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": vector[int] betti_numbers() vector[int] persistent_betti_numbers(double from_value, double to_value) vector[pair[double,double]] intervals_in_dimension(int dimension) - void write_output_diagram(string diagram_file_name) + void write_output_diagram(string diagram_file_name) except + vector[pair[vector[int], vector[int]]] persistence_pairs() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index c34a64e6..7728ebfc 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -449,7 +449,7 @@ cdef class SimplexTree: def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False): - """This function returns the persistence of the simplicial complex. + """This function computes and returns the persistence of the simplicial complex. :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. -- cgit v1.2.3 From 5ce1ee8976ced78de839ef629522c95324b2fabd Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 6 Apr 2020 16:25:27 +0900 Subject: weighted rips added --- src/python/CMakeLists.txt | 3 +++ src/python/gudhi/weighted_rips_complex.py | 41 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 src/python/gudhi/weighted_rips_complex.py diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d7a6a4db..cac4553a 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -415,6 +415,9 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_dtm) endif() + # Weighted Rips + add_gudhi_py_test(test_weighted_rips) + # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) if(MATPLOTLIB_FOUND) diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py new file mode 100644 index 00000000..34a627cb --- /dev/null +++ b/src/python/gudhi/weighted_rips_complex.py @@ -0,0 +1,41 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Raphaël Tinarrage and Yuichi Ike +# +# Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd. +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + +from gudhi import SimplexTree + +class WeightedRipsComplex: + """ + class to generate a weighted Rips complex + from a distance matrix and filtration value + """ + def __init__(self, + distance_matrix=None, + filtration_values=None, + max_filtration=float('inf'), sparse=None): + self.distance_matrix = distance_matrix + self.filtration_values = filtration_values + self.max_filtration = max_filtration + + def create_simplex_tree(self, max_dimension): + dist = self.distance_matrix + F = self.filtration_values + num_pts = len(dist) + + st = SimplexTree() + + for i in range(num_pts): + if F[i] < self.max_filtration: + st.insert([i], F[i]) + for i in range(num_pts): + for j in range(num_pts): + value = (dist[i][j] + F[i] + F[j]) / 2 + if value < self.max_filtration: + st.insert([i,j], filtration=value) + return st + -- cgit v1.2.3 From fadeb80b46001779e2a998941a02195921b03124 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 6 Apr 2020 16:31:59 +0900 Subject: test_weighted_rips added --- src/python/test/test_weighted_rips.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 src/python/test/test_weighted_rips.py diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py new file mode 100644 index 00000000..f0db6798 --- /dev/null +++ b/src/python/test/test_weighted_rips.py @@ -0,0 +1,27 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Yuichi Ike + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +from gudhi.weighted_rips_complex import WeightedRipsComplex +from gudhi.point_cloud.dtm import DTM +import numpy +from scipy.spatial.distance import cdist +import pytest + +def test_dtm_rips_complex(): + pts = numpy.array([[2.0, 2], [0, 1], [3, 4]]) + dist = cdist(pts,pts) + dtm = DTM(2, q=2, metric="precomputed") + r = dtm.fit_transform(dist) + w_rips = WeightedRipsComplex(distance_mattix=dist, filtration_values=r) + st = w_rips.create_simplex_tree(max_dimension=2) + diag = st.persistence() + assert diag == [(0, (1.5811388300841898, float("inf"))), (0, (1.5811388300841898, 2.699172818834085)), (0, (1.5811388300841898, 2.699172818834085))] + + \ No newline at end of file -- cgit v1.2.3 From 5737c5e1e89cc4c939a784742f25b26ca163332d Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 6 Apr 2020 16:43:55 +0900 Subject: comments added --- src/python/gudhi/weighted_rips_complex.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 34a627cb..84e8e38e 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -17,12 +17,26 @@ class WeightedRipsComplex: def __init__(self, distance_matrix=None, filtration_values=None, - max_filtration=float('inf'), sparse=None): + max_filtration=float('inf')): + """ + Parameters: + distance_matrix: list of list of float, + distance matrix (full square or lower triangular) + filtration_values: list of float, + flitration value for each index + max_filtration: float, + specifies the maximal filtration value to be considered + """ self.distance_matrix = distance_matrix self.filtration_values = filtration_values self.max_filtration = max_filtration def create_simplex_tree(self, max_dimension): + """ + Parameter: + max_dimension: int + graph expansion until this given dimension + """ dist = self.distance_matrix F = self.filtration_values num_pts = len(dist) -- cgit v1.2.3 From 15586d479be885319dde6f703c3126176b796732 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 6 Apr 2020 16:48:21 +0900 Subject: bug fixed --- src/python/gudhi/weighted_rips_complex.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 84e8e38e..7d14ac65 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -51,5 +51,7 @@ class WeightedRipsComplex: value = (dist[i][j] + F[i] + F[j]) / 2 if value < self.max_filtration: st.insert([i,j], filtration=value) + + st.expansion(max_dimension) return st -- cgit v1.2.3 From a4fa5f673784a842e9fac13003c843d454c888a4 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 6 Apr 2020 21:19:55 +0900 Subject: bug fixed, parameter name changed --- src/python/CMakeLists.txt | 2 ++ src/python/gudhi/weighted_rips_complex.py | 19 +++++++++++-------- src/python/test/test_weighted_rips.py | 13 ++++++------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index cac4553a..4b87ed9b 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -57,6 +57,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") add_gudhi_debug_info("Cython version ${CYTHON_VERSION}") @@ -228,6 +229,7 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( OUTPUT gudhi.so diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 7d14ac65..9df2ddf9 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -1,6 +1,6 @@ # This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. # See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Raphaël Tinarrage and Yuichi Ike +# Author(s): Raphaël Tinarrage, Yuichi Ike, Masatoshi Takenouchi # # Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd. # @@ -12,23 +12,26 @@ from gudhi import SimplexTree class WeightedRipsComplex: """ class to generate a weighted Rips complex - from a distance matrix and filtration value + from a distance matrix and weights on vertices """ def __init__(self, - distance_matrix=None, - filtration_values=None, + distance_matrix, + weights=None, max_filtration=float('inf')): """ Parameters: distance_matrix: list of list of float, distance matrix (full square or lower triangular) filtration_values: list of float, - flitration value for each index + weight for each vertex max_filtration: float, specifies the maximal filtration value to be considered """ self.distance_matrix = distance_matrix - self.filtration_values = filtration_values + if weights is not None: + self.weights = weights + else: + self.weights = [0] * len(distance_matrix) self.max_filtration = max_filtration def create_simplex_tree(self, max_dimension): @@ -38,7 +41,7 @@ class WeightedRipsComplex: graph expansion until this given dimension """ dist = self.distance_matrix - F = self.filtration_values + F = self.weights num_pts = len(dist) st = SimplexTree() @@ -47,7 +50,7 @@ class WeightedRipsComplex: if F[i] < self.max_filtration: st.insert([i], F[i]) for i in range(num_pts): - for j in range(num_pts): + for j in range(i): value = (dist[i][j] + F[i] + F[j]) / 2 if value < self.max_filtration: st.insert([i,j], filtration=value) diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py index f0db6798..7896fb78 100644 --- a/src/python/test/test_weighted_rips.py +++ b/src/python/test/test_weighted_rips.py @@ -1,6 +1,6 @@ """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - Author(s): Yuichi Ike + Author(s): Yuichi Ike and Masatoshi Takenouchi Copyright (C) 2020 Inria @@ -10,18 +10,17 @@ from gudhi.weighted_rips_complex import WeightedRipsComplex from gudhi.point_cloud.dtm import DTM -import numpy +import numpy as np from scipy.spatial.distance import cdist import pytest def test_dtm_rips_complex(): - pts = numpy.array([[2.0, 2], [0, 1], [3, 4]]) + pts = np.array([[2.0, 2], [0, 1], [3, 4]]) dist = cdist(pts,pts) dtm = DTM(2, q=2, metric="precomputed") r = dtm.fit_transform(dist) - w_rips = WeightedRipsComplex(distance_mattix=dist, filtration_values=r) + w_rips = WeightedRipsComplex(distance_mattix=dist, weights=r) st = w_rips.create_simplex_tree(max_dimension=2) - diag = st.persistence() - assert diag == [(0, (1.5811388300841898, float("inf"))), (0, (1.5811388300841898, 2.699172818834085)), (0, (1.5811388300841898, 2.699172818834085))] + persistence_intervals0 = st.persistence_intervals_in_dimension(0) + assert persistence_intervals0 == pytest.approx(np.array([[1.58113883, 2.69917282],[1.58113883, 2.69917282], [1.58113883, float("inf")]])) - \ No newline at end of file -- cgit v1.2.3 From 5eaca3ed69c564a6f44e6ff21ac33e2cc576bafa Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 6 Apr 2020 15:58:10 +0200 Subject: compute_persistence for cubical --- src/python/gudhi/cubical_complex.pyx | 63 ++++++++++++++------------ src/python/gudhi/periodic_cubical_complex.pyx | 65 +++++++++++++++------------ 2 files changed, 71 insertions(+), 57 deletions(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index ce844558..007abcb6 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -130,8 +130,31 @@ cdef class CubicalComplex: """ return self.thisptr.dimension() + def compute_persistence(self, homology_coeff_field=11, min_persistence=0): + """This function computes the persistence of the complex, so it can be + accessed through :func:`persistent_betti_numbers`, + :func:`persistence_intervals_in_dimension`, etc. This function is + equivalent to :func:`persistence` when you do not want the list + :func:`persistence` returns. + + :param homology_coeff_field: The homology coefficient field. Must be a + prime number + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :returns: Nothing. + """ + if self.pcohptr != NULL: + del self.pcohptr + assert self.__is_defined() + self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + def persistence(self, homology_coeff_field=11, min_persistence=0): - """This function returns the persistence of the complex. + """This function computes and returns the persistence of the complex. :param homology_coeff_field: The homology coefficient field. Must be a prime number @@ -144,31 +167,22 @@ cdef class CubicalComplex: :returns: list of pairs(dimension, pair(birth, death)) -- the persistence of the complex. """ - if self.pcohptr != NULL: - del self.pcohptr - if self.thisptr != NULL: - self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True) - cdef vector[pair[int, pair[double, double]]] persistence_result - if self.pcohptr != NULL: - self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) - persistence_result = self.pcohptr.get_persistence() - return persistence_result + self.compute_persistence(homology_coeff_field, min_persistence) + return self.pcohptr.get_persistence() def betti_numbers(self): """This function returns the Betti numbers of the complex. :returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]). - :note: betti_numbers function requires persistence function to be + :note: betti_numbers function requires :func:`compute_persistence` function to be launched first. :note: betti_numbers function always returns [1, 0, 0, ...] as infinity filtration cubes are not removed from the complex. """ - cdef vector[int] bn_result - if self.pcohptr != NULL: - bn_result = self.pcohptr.betti_numbers() - return bn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()" + return self.pcohptr.betti_numbers() def persistent_betti_numbers(self, from_value, to_value): """This function returns the persistent Betti numbers of the complex. @@ -183,13 +197,11 @@ cdef class CubicalComplex: :returns: list of int -- The persistent Betti numbers ([B0, B1, ..., Bn]). - :note: persistent_betti_numbers function requires persistence + :note: persistent_betti_numbers function requires :func:`compute_persistence` function to be launched first. """ - cdef vector[int] pbn_result - if self.pcohptr != NULL: - pbn_result = self.pcohptr.persistent_betti_numbers(from_value, to_value) - return pbn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()" + return self.pcohptr.persistent_betti_numbers(from_value, to_value) def persistence_intervals_in_dimension(self, dimension): """This function returns the persistence intervals of the complex in a @@ -200,13 +212,8 @@ cdef class CubicalComplex: :returns: The persistence intervals. :rtype: numpy array of dimension 2 - :note: intervals_in_dim function requires persistence function to be + :note: intervals_in_dim function requires :func:`compute_persistence` function to be launched first. """ - cdef vector[pair[double,double]] intervals_result - if self.pcohptr != NULL: - intervals_result = self.pcohptr.intervals_in_dimension(dimension) - else: - print("intervals_in_dim function requires persistence function" - " to be launched first.", file=sys.stderr) - return np.array(intervals_result) + assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()" + return np.array(self.pcohptr.intervals_in_dimension(dimension)) diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index ff5ef3bd..246a3a02 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -135,8 +135,31 @@ cdef class PeriodicCubicalComplex: """ return self.thisptr.dimension() + def compute_persistence(self, homology_coeff_field=11, min_persistence=0): + """This function computes the persistence of the complex, so it can be + accessed through :func:`persistent_betti_numbers`, + :func:`persistence_intervals_in_dimension`, etc. This function is + equivalent to :func:`persistence` when you do not want the list + :func:`persistence` returns. + + :param homology_coeff_field: The homology coefficient field. Must be a + prime number + :type homology_coeff_field: int. + :param min_persistence: The minimum persistence value to take into + account (strictly greater than min_persistence). Default value is + 0.0. + Sets min_persistence to -1.0 to see all values. + :type min_persistence: float. + :returns: Nothing. + """ + if self.pcohptr != NULL: + del self.pcohptr + assert self.__is_defined() + self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True) + self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + def persistence(self, homology_coeff_field=11, min_persistence=0): - """This function returns the persistence of the complex. + """This function computes and returns the persistence of the complex. :param homology_coeff_field: The homology coefficient field. Must be a prime number @@ -149,31 +172,22 @@ cdef class PeriodicCubicalComplex: :returns: list of pairs(dimension, pair(birth, death)) -- the persistence of the complex. """ - if self.pcohptr != NULL: - del self.pcohptr - if self.thisptr != NULL: - self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True) - cdef vector[pair[int, pair[double, double]]] persistence_result - if self.pcohptr != NULL: - self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) - persistence_result = self.pcohptr.get_persistence() - return persistence_result + self.compute_persistence(homology_coeff_field, min_persistence) + return self.pcohptr.get_persistence() def betti_numbers(self): """This function returns the Betti numbers of the complex. :returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]). - :note: betti_numbers function requires persistence function to be + :note: betti_numbers function requires :func:`compute_persistence` function to be launched first. - :note: betti_numbers function always returns [1, 0, 0, ...] as infinity + :note: This function always returns the Betti numbers of a torus as infinity filtration cubes are not removed from the complex. """ - cdef vector[int] bn_result - if self.pcohptr != NULL: - bn_result = self.pcohptr.betti_numbers() - return bn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()" + return self.pcohptr.betti_numbers() def persistent_betti_numbers(self, from_value, to_value): """This function returns the persistent Betti numbers of the complex. @@ -188,13 +202,11 @@ cdef class PeriodicCubicalComplex: :returns: list of int -- The persistent Betti numbers ([B0, B1, ..., Bn]). - :note: persistent_betti_numbers function requires persistence + :note: persistent_betti_numbers function requires :func:`compute_persistence` function to be launched first. """ - cdef vector[int] pbn_result - if self.pcohptr != NULL: - pbn_result = self.pcohptr.persistent_betti_numbers(from_value, to_value) - return pbn_result + assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()" + return self.pcohptr.persistent_betti_numbers(from_value, to_value) def persistence_intervals_in_dimension(self, dimension): """This function returns the persistence intervals of the complex in a @@ -205,13 +217,8 @@ cdef class PeriodicCubicalComplex: :returns: The persistence intervals. :rtype: numpy array of dimension 2 - :note: intervals_in_dim function requires persistence function to be + :note: intervals_in_dim function requires :func:`compute_persistence` function to be launched first. """ - cdef vector[pair[double,double]] intervals_result - if self.pcohptr != NULL: - intervals_result = self.pcohptr.intervals_in_dimension(dimension) - else: - print("intervals_in_dim function requires persistence function" - " to be launched first.", file=sys.stderr) - return np.array(intervals_result) + assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()" + return np.array(self.pcohptr.intervals_in_dimension(dimension)) -- cgit v1.2.3 From 173506323471cf5175ea2b340abec63968c5cd5f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 6 Apr 2020 16:51:32 +0200 Subject: Use compute_persistence in an example --- .../example/alpha_rips_persistence_bottleneck_distance.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py index f156826d..3e12b0d5 100755 --- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py +++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py @@ -5,6 +5,7 @@ import argparse import math import errno import os +import numpy as np """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -56,7 +57,7 @@ with open(args.file, "r") as f: message = "Number of simplices=" + repr(rips_stree.num_simplices()) print(message) - rips_diag = rips_stree.persistence() + rips_stree.compute_persistence() print("##############################################################") print("AlphaComplex creation from points read in a OFF file") @@ -72,18 +73,13 @@ with open(args.file, "r") as f: message = "Number of simplices=" + repr(alpha_stree.num_simplices()) print(message) - alpha_diag = alpha_stree.persistence() + alpha_stree.compute_persistence() max_b_distance = 0.0 for dim in range(args.max_dimension): # Alpha persistence values needs to be transform because filtration # values are alpha square values - funcs = [math.sqrt, math.sqrt] - alpha_intervals = [] - for interval in alpha_stree.persistence_intervals_in_dimension(dim): - alpha_intervals.append( - map(lambda func, value: func(value), funcs, interval) - ) + alpha_intervals = np.sqrt(alpha_stree.persistence_intervals_in_dimension(dim)) rips_intervals = rips_stree.persistence_intervals_in_dimension(dim) bottleneck_distance = gudhi.bottleneck_distance( -- cgit v1.2.3 From dd96965e521313b6210391f511c82cced9b2a950 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 6 Apr 2020 19:37:58 +0200 Subject: Remove trailing whitespace --- src/python/doc/wasserstein_distance_user.rst | 72 +++++++++++++------------- src/python/gudhi/wasserstein/barycenter.py | 42 +++++++-------- src/python/gudhi/wasserstein/wasserstein.py | 14 ++--- src/python/test/test_wasserstein_barycenter.py | 6 +-- src/python/test/test_wasserstein_distance.py | 2 +- 5 files changed, 68 insertions(+), 68 deletions(-) diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index b821b6fa..c24da74d 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -10,10 +10,10 @@ Definition .. include:: wasserstein_distance_sum.inc The q-Wasserstein distance is defined as the minimal value achieved -by a perfect matching between the points of the two diagrams (+ all -diagonal points), where the value of a matching is defined as the +by a perfect matching between the points of the two diagrams (+ all +diagonal points), where the value of a matching is defined as the q-th root of the sum of all edge lengths to the power q. Edge lengths -are measured in norm p, for :math:`1 \leq p \leq \infty`. +are measured in norm p, for :math:`1 \leq p \leq \infty`. Distance Functions ------------------ @@ -54,9 +54,9 @@ The output is: Wasserstein distance value = 1.45 -We can also have access to the optimal matching by letting `matching=True`. +We can also have access to the optimal matching by letting `matching=True`. It is encoded as a list of indices (i,j), meaning that the i-th point in X -is mapped to the j-th point in Y. +is mapped to the j-th point in Y. An index of -1 represents the diagonal. .. testcode:: @@ -84,7 +84,7 @@ An index of -1 represents the diagonal. The output is: .. testoutput:: - + Wasserstein distance value = 2.15 point 0 in dgm1 is matched to point 0 in dgm2 point 1 in dgm1 is matched to point 2 in dgm2 @@ -94,32 +94,32 @@ The output is: Barycenters ----------- -A Frechet mean (or barycenter) is a generalization of the arithmetic -mean in a non linear space such as the one of persistence diagrams. -Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is -defined as a minimizer of the variance functional, that is of -:math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. -where :math:`d_2` denotes the Wasserstein-2 distance between -persistence diagrams. -It is known to exist and is generically unique. However, an exact -computation is in general untractable. Current implementation -available is based on (Turner et al., 2014), +A Frechet mean (or barycenter) is a generalization of the arithmetic +mean in a non linear space such as the one of persistence diagrams. +Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is +defined as a minimizer of the variance functional, that is of +:math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`. +where :math:`d_2` denotes the Wasserstein-2 distance between +persistence diagrams. +It is known to exist and is generically unique. However, an exact +computation is in general untractable. Current implementation +available is based on (Turner et al., 2014), :cite:`turner2014frechet` -and uses an EM-scheme to -provide a local minimum of the variance functional (somewhat similar -to the Lloyd algorithm to estimate a solution to the k-means +and uses an EM-scheme to +provide a local minimum of the variance functional (somewhat similar +to the Lloyd algorithm to estimate a solution to the k-means problem). The local minimum returned depends on the initialization of -the barycenter. -The combinatorial structure of the algorithm limits its -performances on large scale problems (thousands of diagrams and of points -per diagram). +the barycenter. +The combinatorial structure of the algorithm limits its +performances on large scale problems (thousands of diagrams and of points +per diagram). + +.. figure:: + ./img/barycenter.png + :figclass: align-center -.. figure:: - ./img/barycenter.png - :figclass: align-center - - Illustration of Frechet mean between persistence - diagrams. + Illustration of Frechet mean between persistence + diagrams. .. autofunction:: gudhi.wasserstein.barycenter.lagrangian_barycenter @@ -127,16 +127,16 @@ per diagram). Basic example ************* -This example estimates the Frechet mean (aka Wasserstein barycenter) between +This example estimates the Frechet mean (aka Wasserstein barycenter) between four persistence diagrams. It is initialized on the 4th diagram. -As the algorithm is not convex, its output depends on the initialization and +As the algorithm is not convex, its output depends on the initialization and is only a local minimum of the objective function. -Initialization can be either given as an integer (in which case the i-th -diagram of the list is used as initial estimate) or as a diagram. -If None, it will randomly select one of the diagrams of the list +Initialization can be either given as an integer (in which case the i-th +diagram of the list is used as initial estimate) or as a diagram. +If None, it will randomly select one of the diagrams of the list as initial estimate. -Note that persistence diagrams must be submitted as +Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. @@ -152,7 +152,7 @@ Note that persistence diagrams must be submitted as pdiagset = [dg1, dg2, dg3, dg4] bary = lagrangian_barycenter(pdiagset=pdiagset,init=3) - message = "Wasserstein barycenter estimated:" + message = "Wasserstein barycenter estimated:" print(message) print(bary) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index 99f29a1e..de7aea81 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -18,7 +18,7 @@ from gudhi.wasserstein import wasserstein_distance def _mean(x, m): ''' :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} - :param m: total amount of points taken into account, + :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal :returns: the weighted mean of x with (m-k) copies of the diagonal ''' @@ -33,14 +33,14 @@ def _mean(x, m): def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' - :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` - (`n` can variate), encoding a set of - persistence diagrams with only finite coordinates. - :param init: The initial value for barycenter estimate. - If ``None``, init is made on a random diagram from the dataset. - Otherwise, it can be an ``int`` + :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` + (`n` can variate), encoding a set of + persistence diagrams with only finite coordinates. + :param init: The initial value for barycenter estimate. + If ``None``, init is made on a random diagram from the dataset. + Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``) - or a `(n x 2)` ``numpy.array`` enconding + or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points. :type init: ``int``, or (n x 2) ``np.array`` :param verbose: if ``True``, returns additional information about the @@ -48,16 +48,16 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): :type verbose: boolean :returns: If not verbose (default), a ``numpy.array`` encoding the barycenter estimate of pdiagset - (local minimum of the energy function). + (local minimum of the energy function). If ``pdiagset`` is empty, returns ``None``. If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate, and ``log`` is a ``dict`` that contains additional informations: - `"groupings"`, a list of list of pairs ``(i,j)``. - Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates + Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that ``pdiagset[k][i]`` is matched to ``Y[j]`` - if ``i = -1`` or ``j = -1``, it means they + if ``i = -1`` or ``j = -1``, it means they represent the diagonal. - `"energy"`, ``float`` representing the Frechet energy value obtained. @@ -70,13 +70,13 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): if m == 0: print("Warning: computing barycenter of empty diag set. Returns None") return None - + # store the number of off-diagonal point for each of the X_i - nb_off_diag = np.array([len(X_i) for X_i in X]) + nb_off_diag = np.array([len(X_i) for X_i in X]) # Initialisation of barycenter if init is None: i0 = np.random.randint(m) # Index of first state for the barycenter - Y = X[i0].copy() + Y = X[i0].copy() else: if type(init)==int: Y = X[init].copy() @@ -90,8 +90,8 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): nb_iter += 1 K = len(Y) # current nb of points in Y (some might be on diagonal) G = np.full((K, m), -1, dtype=int) # will store for each j, the (index) - # point matched in each other diagram - #(might be the diagonal). + # point matched in each other diagram + #(might be the diagonal). # that is G[j, i] = k <=> y_j is matched to # x_k in the diagram i-th diagram X[i] updated_points = np.zeros((K, 2)) # will store the new positions of @@ -111,7 +111,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): else: # ...which is a diagonal point G[y_j, i] = -1 # -1 stands for the diagonal (mask) else: # We matched a diagonal point to x_i_j... - if x_i_j >= 0: # which is a off-diag point ! + if x_i_j >= 0: # which is a off-diag point ! # need to create new point in Y new_y = _mean(np.array([X[i][x_i_j]]), m) # Average this point with (m-1) copies of Delta @@ -123,19 +123,19 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1] new_y_j = _mean(matched_points, m) if not np.array_equal(new_y_j, np.array([0,0])): - updated_points[j] = new_y_j + updated_points[j] = new_y_j else: # this points is no longer of any use. to_delete.append(j) # we remove the point to be deleted now. - updated_points = np.delete(updated_points, to_delete, axis=0) + updated_points = np.delete(updated_points, to_delete, axis=0) # we cannot converge if there have been new created points. - if new_created_points: + if new_created_points: Y = np.concatenate((updated_points, new_created_points)) else: # Step 3 : we check convergence if np.array_equal(updated_points, Y): - converged = True + converged = True Y = updated_points diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index e1233eec..35315939 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -30,9 +30,9 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): :param Y: (m x 2) numpy.array encoding the second diagram. :param order: exponent for the Wasserstein metric. :param internal_p: Ground metric (i.e. norm L^p). - :returns: (n+1) x (m+1) np.array encoding the cost matrix C. - For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], - while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) + :returns: (n+1) x (m+1) np.array encoding the cost matrix C. + For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j], + while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal projection onto the diagonal. note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). ''' @@ -59,7 +59,7 @@ def _perstot(X, order, internal_p): :param X: (n x 2) numpy.array (points of a given diagram). :param order: exponent for Wasserstein. Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). - :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). + :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). ''' Xdiag = _proj_on_diag(X) return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) @@ -67,16 +67,16 @@ def _perstot(X, order, internal_p): def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): ''' - :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points + :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). :param Y: (m x 2) numpy.array encoding the second diagram. :param matching: if True, computes and returns the optimal matching between X and Y, encoded as a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to the j-th point in Y, with the convention (-1) represents the diagonal. :param order: exponent for Wasserstein; Default value is 2. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). - :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with + :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. If matching is set to True, also returns the optimal matching between X and Y. ''' diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py index f686aef5..f68c748e 100755 --- a/src/python/test/test_wasserstein_barycenter.py +++ b/src/python/test/test_wasserstein_barycenter.py @@ -17,7 +17,7 @@ __license__ = "MIT" def test_lagrangian_barycenter(): - + dg1 = np.array([[0.2, 0.5]]) dg2 = np.array([[0.2, 0.7]]) dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]]) @@ -28,12 +28,12 @@ def test_lagrangian_barycenter(): dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]]) dg8 = np.array([[0., 4.], [4, 8]]) - + # error crit. eps = 1e-7 - assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps + assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2))) assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 0d70e11a..7e0d0f5f 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -70,7 +70,7 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat assert np.array_equal(match , [[0, -1], [1, -1]]) match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]]) - + def hera_wrap(delta): -- cgit v1.2.3 From 4294e5fc6e1bff246a7d22f1bd98f91b62f14163 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 7 Apr 2020 09:36:03 +0900 Subject: filtration value fixed --- src/python/gudhi/weighted_rips_complex.py | 2 +- src/python/test/test_weighted_rips.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 9df2ddf9..7e504b2c 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -51,7 +51,7 @@ class WeightedRipsComplex: st.insert([i], F[i]) for i in range(num_pts): for j in range(i): - value = (dist[i][j] + F[i] + F[j]) / 2 + value = max(F[i], F[j], (dist[i][j] + F[i] + F[j]) / 2) if value < self.max_filtration: st.insert([i,j], filtration=value) diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py index 7896fb78..a3235276 100644 --- a/src/python/test/test_weighted_rips.py +++ b/src/python/test/test_weighted_rips.py @@ -14,13 +14,23 @@ import numpy as np from scipy.spatial.distance import cdist import pytest +def test_non_dtm_rips_complex(): + dist = [[], [1]] + weights = [1, 100] + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights) + st = w_rips.create_simplex_tree(max_dimension=2) + assert st.filtration([0,1]) == pytest.approx(100.0) + + def test_dtm_rips_complex(): pts = np.array([[2.0, 2], [0, 1], [3, 4]]) dist = cdist(pts,pts) dtm = DTM(2, q=2, metric="precomputed") r = dtm.fit_transform(dist) - w_rips = WeightedRipsComplex(distance_mattix=dist, weights=r) + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r) st = w_rips.create_simplex_tree(max_dimension=2) + st.persistence() persistence_intervals0 = st.persistence_intervals_in_dimension(0) assert persistence_intervals0 == pytest.approx(np.array([[1.58113883, 2.69917282],[1.58113883, 2.69917282], [1.58113883, float("inf")]])) + -- cgit v1.2.3 From 82dd4481fa0ecb8c1f696ee33e26d9be1e371e88 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 6 Apr 2020 22:46:32 +0200 Subject: Document dependencies for building the doc --- src/python/doc/installation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index d459145b..48425d5e 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -175,8 +175,8 @@ Documentation To build the documentation, `sphinx-doc `_ and `sphinxcontrib-bibtex `_ are required. As the documentation is auto-tested, `CGAL`_, `Eigen`_, -`Matplotlib`_, `NumPy`_ and `SciPy`_ are also mandatory to build the -documentation. +`Matplotlib`_, `NumPy`_, `POT`_, `Scikit-learn`_ and `SciPy`_ are +also mandatory to build the documentation. Run the following commands in a terminal: @@ -192,8 +192,8 @@ CGAL ==== Some GUDHI modules (cf. :doc:`modules list `), and few examples -require CGAL, a C++ library that provides easy access to efficient and -reliable geometric algorithms. +require `CGAL `_, a C++ library that provides easy +access to efficient and reliable geometric algorithms. The procedure to install this library -- cgit v1.2.3 From f9a933862050ca95b3a96d7a8572d62f7f2205a9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 11 Apr 2020 18:18:14 +0200 Subject: Use longer names --- src/python/gudhi/point_cloud/dtm.py | 10 +++-- src/python/gudhi/point_cloud/knn.py | 2 +- src/python/test/test_dtm.py | 18 ++++----- src/python/test/test_knn.py | 76 +++++++++++++++++++++++++++---------- 4 files changed, 71 insertions(+), 35 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 23c36b88..38368f29 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -7,10 +7,10 @@ # Modification(s): # - YYYY/MM Author: Description of the modification -from .knn import KNN +from .knn import KNearestNeighbors -class DTM: +class DistanceToMeasure: """ Class to compute the distance to the empirical measure defined by a point set, as introduced in :cite:`dtm`. """ @@ -20,7 +20,7 @@ class DTM: Args: k (int): number of neighbors (possibly including the point itself). q (float): order used to compute the distance to measure. Defaults to 2. - kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNN`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. + kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. """ self.k = k self.q = q @@ -35,7 +35,9 @@ class DTM: X (numpy.array): coordinates for mass points. """ if self.params.setdefault("metric", "euclidean") != "neighbors": - self.knn = KNN(self.k, return_index=False, return_distance=True, sort_results=False, **self.params) + self.knn = KNearestNeighbors( + self.k, return_index=False, return_distance=True, sort_results=False, **self.params + ) self.knn.fit(X) return self diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 8369f1f8..6642a3c2 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -10,7 +10,7 @@ import numpy -class KNN: +class KNearestNeighbors: """ Class wrapping several implementations for computing the k nearest neighbors in a point set. """ diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 93b13e1a..37934fdb 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -8,7 +8,7 @@ - YYYY/MM Author: Description of the modification """ -from gudhi.point_cloud.dtm import DTM +from gudhi.point_cloud.dtm import DistanceToMeasure import numpy import pytest @@ -16,35 +16,35 @@ import pytest def test_dtm_compare_euclidean(): pts = numpy.random.rand(1000, 4) k = 3 - dtm = DTM(k, implementation="ckdtree") + dtm = DistanceToMeasure(k, implementation="ckdtree") r0 = dtm.fit_transform(pts) - dtm = DTM(k, implementation="sklearn") + dtm = DistanceToMeasure(k, implementation="sklearn") r1 = dtm.fit_transform(pts) assert r1 == pytest.approx(r0) - dtm = DTM(k, implementation="sklearn", algorithm="brute") + dtm = DistanceToMeasure(k, implementation="sklearn", algorithm="brute") r2 = dtm.fit_transform(pts) assert r2 == pytest.approx(r0) - dtm = DTM(k, implementation="hnsw") + dtm = DistanceToMeasure(k, implementation="hnsw") r3 = dtm.fit_transform(pts) assert r3 == pytest.approx(r0) from scipy.spatial.distance import cdist d = cdist(pts, pts) - dtm = DTM(k, metric="precomputed") + dtm = DistanceToMeasure(k, metric="precomputed") r4 = dtm.fit_transform(d) assert r4 == pytest.approx(r0) - dtm = DTM(k, implementation="keops") + dtm = DistanceToMeasure(k, implementation="keops") r5 = dtm.fit_transform(pts) assert r5 == pytest.approx(r0) def test_dtm_precomputed(): dist = numpy.array([[1.0, 3, 8], [1, 5, 5], [0, 2, 3]]) - dtm = DTM(2, q=1, metric="neighbors") + dtm = DistanceToMeasure(2, q=1, metric="neighbors") r = dtm.fit_transform(dist) assert r == pytest.approx([2.0, 3, 1]) dist = numpy.array([[2.0, 2], [0, 1], [3, 4]]) - dtm = DTM(2, q=2, metric="neighbors") + dtm = DistanceToMeasure(2, q=2, metric="neighbors") r = dtm.fit_transform(dist) assert r == pytest.approx([2.0, 0.707, 3.5355], rel=0.01) diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py index e455fb48..6aac2006 100755 --- a/src/python/test/test_knn.py +++ b/src/python/test/test_knn.py @@ -8,7 +8,7 @@ - YYYY/MM Author: Description of the modification """ -from gudhi.point_cloud.knn import KNN +from gudhi.point_cloud.knn import KNearestNeighbors import numpy as np import pytest @@ -16,39 +16,39 @@ import pytest def test_knn_explicit(): base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) query = np.array([[1.0, 1], [2, 2], [4, 4]]) - knn = KNN(2, metric="manhattan", return_distance=True, return_index=True) + knn = KNearestNeighbors(2, metric="manhattan", return_distance=True, return_index=True) knn.fit(base) r = knn.transform(query) assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]])) assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]])) - knn = KNN(2, metric="chebyshev", return_distance=True, return_index=False) + knn = KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False) knn.fit(base) r = knn.transform(query) assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) r = ( - KNN(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops") + KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops") .fit(base) .transform(query) ) assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) - knn = KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True) + knn = KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True) knn.fit(base) r = knn.transform(query) assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) r = ( - KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops") + KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops") .fit(base) .transform(query) ) assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]]) - knn = KNN(2, metric="precomputed", return_index=True, return_distance=False) + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False) r = knn.fit_transform(dist) assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) - knn = KNN(2, metric="precomputed", return_index=True, return_distance=True) + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True) r = knn.fit_transform(dist) assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) @@ -57,16 +57,40 @@ def test_knn_explicit(): def test_knn_compare(): base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) query = np.array([[1.0, 1], [2, 2], [4, 4]]) - r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=False).fit(base).transform(query) - r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=False).fit(base).transform(query) - r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query) - r3 = KNN(2, implementation="keops", return_index=True, return_distance=False).fit(base).transform(query) + r0 = ( + KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=False) + .fit(base) + .transform(query) + ) + r1 = ( + KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=False) + .fit(base) + .transform(query) + ) + r2 = ( + KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query) + ) + r3 = ( + KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=False) + .fit(base) + .transform(query) + ) assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3) - r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=True).fit(base).transform(query) - r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=True).fit(base).transform(query) - r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query) - r3 = KNN(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query) + r0 = ( + KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=True) + .fit(base) + .transform(query) + ) + r1 = ( + KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=True) + .fit(base) + .transform(query) + ) + r2 = KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query) + r3 = ( + KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query) + ) assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0]) d0 = pytest.approx(r0[1]) assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0 @@ -75,8 +99,18 @@ def test_knn_compare(): def test_knn_nop(): # This doesn't look super useful... p = np.array([[0.0]]) - assert None is KNN(k=1, return_index=False, return_distance=False, implementation="sklearn").fit_transform(p) - assert None is KNN(k=1, return_index=False, return_distance=False, implementation="ckdtree").fit_transform(p) - assert None is KNN(k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5).fit_transform(p) - assert None is KNN(k=1, return_index=False, return_distance=False, implementation="keops").fit_transform(p) - assert None is KNN(k=1, return_index=False, return_distance=False, metric="precomputed").fit_transform(p) + assert None is KNearestNeighbors( + k=1, return_index=False, return_distance=False, implementation="sklearn" + ).fit_transform(p) + assert None is KNearestNeighbors( + k=1, return_index=False, return_distance=False, implementation="ckdtree" + ).fit_transform(p) + assert None is KNearestNeighbors( + k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5 + ).fit_transform(p) + assert None is KNearestNeighbors( + k=1, return_index=False, return_distance=False, implementation="keops" + ).fit_transform(p) + assert None is KNearestNeighbors( + k=1, return_index=False, return_distance=False, metric="precomputed" + ).fit_transform(p) -- cgit v1.2.3 From 83a1bc1fb6124a35d515f4836d2e830f3dbdf0e7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 12 Apr 2020 21:57:51 +0200 Subject: Parallelize the "precomputed" case of knn It is supposed to be possible to compile numpy with openmp, but it looks like it isn't done in any of the usual packages. It may be possible to refactor that code so there is less redundancy. --- src/python/gudhi/point_cloud/knn.py | 78 +++++++++++++++++++++++++++++-------- src/python/test/test_dtm.py | 3 ++ src/python/test/test_knn.py | 8 ++++ 3 files changed, 73 insertions(+), 16 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 6642a3c2..f6870517 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -115,25 +115,71 @@ class KNearestNeighbors: if metric == "precomputed": # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural. - X = numpy.array(X) if self.return_index: - neighbors = numpy.argpartition(X, k - 1)[:, 0:k] - if self.params.get("sort_results", True): - X = numpy.take_along_axis(X, neighbors, axis=-1) - ngb_order = numpy.argsort(X, axis=-1) - neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + n_jobs = self.params.get("n_jobs", 1) + # Supposedly numpy can be compiled with OpenMP and handle this, but nobody does that?! + if n_jobs == 1: + neighbors = numpy.argpartition(X, k - 1)[:, 0:k] + if self.params.get("sort_results", True): + X = numpy.take_along_axis(X, neighbors, axis=-1) + ngb_order = numpy.argsort(X, axis=-1) + neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + else: + ngb_order = neighbors + if self.return_distance: + distances = numpy.take_along_axis(X, ngb_order, axis=-1) + return neighbors, distances + else: + return neighbors else: - ngb_order = neighbors - if self.return_distance: - distances = numpy.take_along_axis(X, ngb_order, axis=-1) - return neighbors, distances - else: - return neighbors + from joblib import Parallel, delayed, effective_n_jobs + from sklearn.utils import gen_even_slices + + slices = gen_even_slices(len(X), effective_n_jobs(-1)) + parallel = Parallel(backend="threading", n_jobs=-1) + if self.params.get("sort_results", True): + + def func(M): + neighbors = numpy.argpartition(M, k - 1)[:, 0:k] + Y = numpy.take_along_axis(M, neighbors, axis=-1) + ngb_order = numpy.argsort(Y, axis=-1) + return numpy.take_along_axis(neighbors, ngb_order, axis=-1) + + else: + + def func(M): + return numpy.argpartition(M, k - 1)[:, 0:k] + + neighbors = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices)) + if self.return_distance: + distances = numpy.take_along_axis(X, neighbors, axis=-1) + return neighbors, distances + else: + return neighbors if self.return_distance: - distances = numpy.partition(X, k - 1)[:, 0:k] - if self.params.get("sort_results"): - # partition is not guaranteed to sort the lower half, although it often does - distances.sort(axis=-1) + n_jobs = self.params.get("n_jobs", 1) + if n_jobs == 1: + distances = numpy.partition(X, k - 1)[:, 0:k] + if self.params.get("sort_results"): + # partition is not guaranteed to sort the lower half, although it often does + distances.sort(axis=-1) + else: + from joblib import Parallel, delayed, effective_n_jobs + from sklearn.utils import gen_even_slices + + if self.params.get("sort_results"): + + def func(M): + # Not partitioning in place, because we should not modify the user's array? + r = numpy.partition(M, k - 1)[:, 0:k] + r.sort(axis=-1) + return r + + else: + func = lambda M: numpy.partition(M, k - 1)[:, 0:k] + slices = gen_even_slices(len(X), effective_n_jobs(-1)) + parallel = Parallel(backend="threading", n_jobs=-1) + distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices)) return distances return None diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 37934fdb..bc0d3698 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -33,6 +33,9 @@ def test_dtm_compare_euclidean(): dtm = DistanceToMeasure(k, metric="precomputed") r4 = dtm.fit_transform(d) assert r4 == pytest.approx(r0) + dtm = DistanceToMeasure(k, metric="precomputed", n_jobs=2) + r4b = dtm.fit_transform(d) + assert r4b == pytest.approx(r0) dtm = DistanceToMeasure(k, implementation="keops") r5 = dtm.fit_transform(pts) assert r5 == pytest.approx(r0) diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py index 6aac2006..6269df54 100755 --- a/src/python/test/test_knn.py +++ b/src/python/test/test_knn.py @@ -52,6 +52,14 @@ def test_knn_explicit(): r = knn.fit_transform(dist) assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) + # Second time in parallel + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2) + r = knn.fit_transform(dist) + assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, n_jobs=2) + r = knn.fit_transform(dist) + assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) + assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) def test_knn_compare(): -- cgit v1.2.3 From 280eb9d2323837619db1ae013b929adb9b45013b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 01:09:45 +0200 Subject: enable_autodiff with keops This doesn't seem like the best way to handle it, we may want to handle it like a wrapper that gets the indices from knn (whatever backend) and then computes the distances. --- src/python/gudhi/point_cloud/knn.py | 33 +++++++++++++++++++++++++++++---- src/python/test/test_dtm.py | 8 ++++++++ src/python/test/test_knn.py | 6 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index f6870517..79362c09 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -36,6 +36,9 @@ class KNearestNeighbors: sort_results (bool): if True, then distances and indices of each point are sorted on return, so that the first column contains the closest points. Otherwise, neighbors are returned in an arbitrary order. Defaults to True. + enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.array or similar, this instructs + the function to compute distances in a way that works with automatic differentiation. + This is experimental and not supported for all implementations. kwargs: additional parameters are forwarded to the backends. """ self.k = k @@ -202,13 +205,18 @@ class KNearestNeighbors: if self.params["implementation"] == "keops": import torch from pykeops.torch import LazyTensor + import eagerpy as ep # 'float64' is slow except on super expensive GPUs. Allow it with some param? - XX = torch.tensor(X, dtype=torch.float32) - if X is self.ref_points: + queries = X + X = ep.astensor(X) + XX = torch.as_tensor(X.numpy(), dtype=torch.float32) + if queries is self.ref_points: + Y = X YY = XX else: - YY = torch.tensor(self.ref_points, dtype=torch.float32) + Y = ep.astensor(self.ref_points) + YY = torch.as_tensor(Y.numpy(), dtype=torch.float32) p = self.params["p"] if p == numpy.inf: @@ -219,6 +227,24 @@ class KNearestNeighbors: else: mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1) + # pykeops does not support autodiff for kmin yet :-( + if self.params.get("enable_autodiff", False) and self.return_distance: + # Compute the indices of the neighbors, and recompute the relevant distances autodiff-friendly. + # Another strategy would be to compute the whole distance matrix with torch.cdist + # and use neighbors as indices into it. + neighbors = ep.astensor(mat.argKmin(k, dim=1)).numpy() + neighbor_pts = Y[neighbors] + diff = neighbor_pts - X[:, None, :] + if p == numpy.inf: + distances = diff.abs().max(-1) + elif p == 2: + distances = (diff ** 2).sum(-1) ** 0.5 + else: + distances = (diff.abs() ** p).sum(-1) ** (1.0 / p) + if self.return_index: + return neighbors.raw, distances.raw + else: + return distances.raw if self.return_index: if self.return_distance: distances, neighbors = mat.Kmin_argKmin(k, dim=1) @@ -234,7 +260,6 @@ class KNearestNeighbors: distances = distances ** (1.0 / p) return distances return None - # FIXME: convert everything back to numpy arrays or not? if self.params["implementation"] == "ckdtree": qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}} diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index bc0d3698..8709dd07 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -11,6 +11,7 @@ from gudhi.point_cloud.dtm import DistanceToMeasure import numpy import pytest +import torch def test_dtm_compare_euclidean(): @@ -39,6 +40,13 @@ def test_dtm_compare_euclidean(): dtm = DistanceToMeasure(k, implementation="keops") r5 = dtm.fit_transform(pts) assert r5 == pytest.approx(r0) + pts2 = torch.tensor(pts, requires_grad=True) + assert pts2.grad is None + dtm = DistanceToMeasure(k, implementation="keops", enable_autodiff=True) + r6 = dtm.fit_transform(pts2) + assert r6.detach().numpy() == pytest.approx(r0) + r6.sum().backward() + assert pts2.grad is not None def test_dtm_precomputed(): diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py index 6269df54..415c9d48 100755 --- a/src/python/test/test_knn.py +++ b/src/python/test/test_knn.py @@ -32,6 +32,12 @@ def test_knn_explicit(): .transform(query) ) assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) + r = ( + KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops", enable_autodiff=True) + .fit(base) + .transform(query) + ) + assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) knn = KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True) knn.fit(base) -- cgit v1.2.3 From 2f1576a23cf4ac055565875d384ca604c0ff6844 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 15:01:51 +0200 Subject: Small autodiff tweaks --- src/python/gudhi/point_cloud/knn.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 79362c09..ab3447d4 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -233,16 +233,17 @@ class KNearestNeighbors: # Another strategy would be to compute the whole distance matrix with torch.cdist # and use neighbors as indices into it. neighbors = ep.astensor(mat.argKmin(k, dim=1)).numpy() - neighbor_pts = Y[neighbors] + # Work around https://github.com/pytorch/pytorch/issues/34452 + neighbor_pts = Y[neighbors,] diff = neighbor_pts - X[:, None, :] if p == numpy.inf: distances = diff.abs().max(-1) elif p == 2: - distances = (diff ** 2).sum(-1) ** 0.5 + distances = (diff ** 2).sum(-1).sqrt() else: distances = (diff.abs() ** p).sum(-1) ** (1.0 / p) if self.return_index: - return neighbors.raw, distances.raw + return neighbors, distances.raw else: return distances.raw if self.return_index: -- cgit v1.2.3 From 3a86402b733a48d9c25a4995325e72c7438c06c0 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 15:21:06 +0200 Subject: Fix NaN gradient with pytorch --- src/python/gudhi/point_cloud/knn.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index ab3447d4..185a7764 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -236,12 +236,11 @@ class KNearestNeighbors: # Work around https://github.com/pytorch/pytorch/issues/34452 neighbor_pts = Y[neighbors,] diff = neighbor_pts - X[:, None, :] - if p == numpy.inf: - distances = diff.abs().max(-1) - elif p == 2: - distances = (diff ** 2).sum(-1).sqrt() + if isinstance(diff, ep.PyTorchTensor): + # https://github.com/jonasrauber/eagerpy/issues/6 + distances = ep.astensor(diff.raw.norm(p, -1)) else: - distances = (diff.abs() ** p).sum(-1) ** (1.0 / p) + distances = diff.norms.lp(p, -1) if self.return_index: return neighbors, distances.raw else: -- cgit v1.2.3 From 3afce326428dddd638e22ab37ee4b2afe52eba75 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 20:32:39 +0200 Subject: Generalize enable_autodiff to more implementations Still limited to L^p --- src/python/gudhi/point_cloud/knn.py | 76 +++++++++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 21 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 185a7764..87b2798e 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -9,6 +9,7 @@ import numpy +# TODO: https://github.com/facebookresearch/faiss class KNearestNeighbors: """ @@ -67,6 +68,8 @@ class KNearestNeighbors: self.params["implementation"] = "ckdtree" else: self.params["implementation"] = "sklearn" + if not return_distance: + self.params["enable_autodiff"] = False def fit_transform(self, X, y=None): return self.fit(X).transform(X) @@ -77,6 +80,10 @@ class KNearestNeighbors: X (numpy.array): coordinates for reference points. """ self.ref_points = X + if self.params.get("enable_autodiff", False): + import eagerpy as ep + if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor): + X = ep.astensor(X).numpy() if self.params["implementation"] == "ckdtree": # sklearn could handle this, but it is much slower from scipy.spatial import cKDTree @@ -113,6 +120,41 @@ class KNearestNeighbors: Args: X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed". """ + if self.params.get("enable_autodiff", False): + # pykeops does not support autodiff for kmin yet, but when it does in the future, + # we may want a special path. + import eagerpy as ep + save_return_index = self.return_index + self.return_index = True + self.return_distance = False + self.params["enable_autodiff"] = False + try: + # FIXME: how do we test "X is ref_points" then? + newX = ep.astensor(X) + if self.params["implementation"] != "keops" or not isinstance(newX, ep.PyTorchTensor): + newX = newX.numpy() + neighbors = self.transform(newX) + finally: + self.return_index = save_return_index + self.return_distance = True + self.params["enable_autodiff"] = True + # We can implement more later as needed + assert self.metric == "minkowski" + p = self.params["p"] + Y = ep.astensor(self.ref_points) + neighbor_pts = Y[neighbors,] + diff = neighbor_pts - X[:, None, :] + if isinstance(diff, ep.PyTorchTensor): + # https://github.com/jonasrauber/eagerpy/issues/6 + distances = ep.astensor(diff.raw.norm(p, -1)) + else: + distances = diff.norms.lp(p, -1) + if self.return_index: + return neighbors, distances.raw + else: + return distances.raw + + metric = self.metric k = self.k @@ -207,16 +249,26 @@ class KNearestNeighbors: from pykeops.torch import LazyTensor import eagerpy as ep - # 'float64' is slow except on super expensive GPUs. Allow it with some param? queries = X X = ep.astensor(X) - XX = torch.as_tensor(X.numpy(), dtype=torch.float32) + if isinstance(X, ep.PyTorchTensor): + XX = X.raw + else: + # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch + # without copying to/from the CPU. + XX = X.numpy() + # 'float64' is slow except on super expensive GPUs. Allow it with some param? + XX = torch.as_tensor(XX, dtype=torch.float32) if queries is self.ref_points: Y = X YY = XX else: Y = ep.astensor(self.ref_points) - YY = torch.as_tensor(Y.numpy(), dtype=torch.float32) + if isinstance(Y, ep.PyTorchTensor): + YY = Y.raw + else: + YY = Y.numpy() + YY = torch.as_tensor(YY, dtype=torch.float32) p = self.params["p"] if p == numpy.inf: @@ -227,24 +279,6 @@ class KNearestNeighbors: else: mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1) - # pykeops does not support autodiff for kmin yet :-( - if self.params.get("enable_autodiff", False) and self.return_distance: - # Compute the indices of the neighbors, and recompute the relevant distances autodiff-friendly. - # Another strategy would be to compute the whole distance matrix with torch.cdist - # and use neighbors as indices into it. - neighbors = ep.astensor(mat.argKmin(k, dim=1)).numpy() - # Work around https://github.com/pytorch/pytorch/issues/34452 - neighbor_pts = Y[neighbors,] - diff = neighbor_pts - X[:, None, :] - if isinstance(diff, ep.PyTorchTensor): - # https://github.com/jonasrauber/eagerpy/issues/6 - distances = ep.astensor(diff.raw.norm(p, -1)) - else: - distances = diff.norms.lp(p, -1) - if self.return_index: - return neighbors, distances.raw - else: - return distances.raw if self.return_index: if self.return_distance: distances, neighbors = mat.Kmin_argKmin(k, dim=1) -- cgit v1.2.3 From 521d8c17c2b7d71c46a51f0490ff2c13c809fc87 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 21:13:19 +0200 Subject: Remove left-over code eagerpy is only used with enable_autodiff --- src/python/gudhi/point_cloud/knn.py | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 87b2798e..f2cddb38 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -82,8 +82,11 @@ class KNearestNeighbors: self.ref_points = X if self.params.get("enable_autodiff", False): import eagerpy as ep + X = ep.astensor(X) if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor): - X = ep.astensor(X).numpy() + # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch + # without copying to/from the CPU. + X = X.numpy() if self.params["implementation"] == "ckdtree": # sklearn could handle this, but it is much slower from scipy.spatial import cKDTree @@ -133,6 +136,8 @@ class KNearestNeighbors: newX = ep.astensor(X) if self.params["implementation"] != "keops" or not isinstance(newX, ep.PyTorchTensor): newX = newX.numpy() + else: + newX = X neighbors = self.transform(newX) finally: self.return_index = save_return_index @@ -247,29 +252,13 @@ class KNearestNeighbors: if self.params["implementation"] == "keops": import torch from pykeops.torch import LazyTensor - import eagerpy as ep - queries = X - X = ep.astensor(X) - if isinstance(X, ep.PyTorchTensor): - XX = X.raw - else: - # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch - # without copying to/from the CPU. - XX = X.numpy() # 'float64' is slow except on super expensive GPUs. Allow it with some param? - XX = torch.as_tensor(XX, dtype=torch.float32) - if queries is self.ref_points: - Y = X + XX = torch.as_tensor(X, dtype=torch.float32) + if X is self.ref_points: YY = XX else: - Y = ep.astensor(self.ref_points) - if isinstance(Y, ep.PyTorchTensor): - YY = Y.raw - else: - YY = Y.numpy() - YY = torch.as_tensor(YY, dtype=torch.float32) - + YY = torch.as_tensor(self.ref_points, dtype=torch.float32) p = self.params["p"] if p == numpy.inf: # Requires pykeops 1.4 or later -- cgit v1.2.3 From ce75f66da5a2d7ad2c479355112d48817c5ba68b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 21:38:24 +0200 Subject: Tweak to detect fit_transform --- src/python/gudhi/point_cloud/knn.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index f2cddb38..8b3cdb46 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -11,6 +11,7 @@ import numpy # TODO: https://github.com/facebookresearch/faiss + class KNearestNeighbors: """ Class wrapping several implementations for computing the k nearest neighbors in a point set. @@ -82,6 +83,7 @@ class KNearestNeighbors: self.ref_points = X if self.params.get("enable_autodiff", False): import eagerpy as ep + X = ep.astensor(X) if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor): # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch @@ -127,17 +129,19 @@ class KNearestNeighbors: # pykeops does not support autodiff for kmin yet, but when it does in the future, # we may want a special path. import eagerpy as ep + save_return_index = self.return_index self.return_index = True self.return_distance = False self.params["enable_autodiff"] = False try: - # FIXME: how do we test "X is ref_points" then? newX = ep.astensor(X) - if self.params["implementation"] != "keops" or not isinstance(newX, ep.PyTorchTensor): + if self.params["implementation"] != "keops" or ( + not isinstance(newX, ep.PyTorchTensor) and not isinstance(newX, ep.NumPyTensor) + ): newX = newX.numpy() else: - newX = X + newX = newX.raw neighbors = self.transform(newX) finally: self.return_index = save_return_index @@ -159,7 +163,6 @@ class KNearestNeighbors: else: return distances.raw - metric = self.metric k = self.k -- cgit v1.2.3 From e40f81361c6d40e3470f9b3880fceefd837b3da2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 13 Apr 2020 21:41:12 +0200 Subject: pip install eagerpy --- .github/test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt index 4f9dcefb..fb1df134 100644 --- a/.github/test-requirements.txt +++ b/.github/test-requirements.txt @@ -10,3 +10,4 @@ tensorflow torch pykeops hnswlib +eagerpy -- cgit v1.2.3 From f0c5aab988ee966510503a30b0591105594ac67d Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 15:37:31 +0200 Subject: More testing --- src/python/test/test_dtm.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 8709dd07..db3e5df5 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -47,6 +47,13 @@ def test_dtm_compare_euclidean(): assert r6.detach().numpy() == pytest.approx(r0) r6.sum().backward() assert pts2.grad is not None + pts2 = torch.tensor(pts, requires_grad=True) + assert pts2.grad is None + dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True) + r7 = dtm.fit_transform(pts2) + assert r7.detach().numpy() == pytest.approx(r0) + r7.sum().backward() + assert pts2.grad is not None def test_dtm_precomputed(): -- cgit v1.2.3 From b908205e85bbe29c8d18ad1f38e783a1327434d7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 17:00:27 +0200 Subject: EagerPy in cmake --- src/cmake/modules/GUDHI_third_party_libraries.cmake | 1 + src/python/CMakeLists.txt | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index a931b3a1..0abe66b7 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -181,6 +181,7 @@ if( PYTHONINTERP_FOUND ) find_python_module("pybind11") find_python_module("torch") find_python_module("pykeops") + find_python_module("eagerpy") find_python_module_no_version("hnswlib") endif() diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d7a6a4db..99e8b57c 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -88,6 +88,9 @@ if(PYTHONINTERP_FOUND) if(PYKEOPS_FOUND) add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}") endif() + if(EAGERPY_FOUND) + add_gudhi_debug_info("EagerPy version ${EAGERPY_VERSION}") + endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ") @@ -410,7 +413,7 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_time_delay) # DTM - if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND AND EAGERPY_FOUND) add_gudhi_py_test(test_knn) add_gudhi_py_test(test_dtm) endif() -- cgit v1.2.3 From 6d02ca0e077cc9750275abdfc024429cec0ba5a5 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 17:10:58 +0200 Subject: Install Ubuntu's python3-grpcio since the one from PyPI seems broken at the moment. --- Dockerfile_for_circleci_image | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index 20754e2a..c2e8a8f5 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -43,6 +43,7 @@ RUN apt-get install -y make \ python3 \ python3-pip \ python3-tk \ + python3-grpcio \ libfreetype6-dev \ pkg-config \ curl -- cgit v1.2.3 From 9518287cfa2a62948ede2e7d17d5c9f29092e0f4 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 18:27:19 +0200 Subject: Doc improvements --- src/python/gudhi/point_cloud/dtm.py | 12 ++++++++++-- src/python/gudhi/point_cloud/knn.py | 11 ++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 38368f29..58dec536 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -20,7 +20,9 @@ class DistanceToMeasure: Args: k (int): number of neighbors (possibly including the point itself). q (float): order used to compute the distance to measure. Defaults to 2. - kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that metric="neighbors" means that :func:`transform` expects an array with the distances to the k nearest neighbors. + kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that + metric="neighbors" means that :func:`transform` expects an array with the distances + to the k nearest neighbors. """ self.k = k self.q = q @@ -44,7 +46,13 @@ class DistanceToMeasure: def transform(self, X): """ Args: - X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", or distances to the k nearest neighbors if metric is "neighbors" (if the array has more than k columns, the remaining ones are ignored). + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", + or distances to the k nearest neighbors if metric is "neighbors" (if the array has more + than k columns, the remaining ones are ignored). + + Returns: + numpy.array: a 1-d array with, for each point of X, its distance to the measure defined + by the argument of :func:`fit`. """ if self.params["metric"] == "neighbors": distances = X[:, : self.k] diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 8b3cdb46..d7cf0b2a 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -38,9 +38,9 @@ class KNearestNeighbors: sort_results (bool): if True, then distances and indices of each point are sorted on return, so that the first column contains the closest points. Otherwise, neighbors are returned in an arbitrary order. Defaults to True. - enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.array or similar, this instructs - the function to compute distances in a way that works with automatic differentiation. - This is experimental and not supported for all implementations. + enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.ndarray or tensorflow.Tensor, this + instructs the function to compute distances in a way that works with automatic differentiation. + This is experimental and not supported for all metrics. Defaults to False. kwargs: additional parameters are forwarded to the backends. """ self.k = k @@ -124,6 +124,11 @@ class KNearestNeighbors: """ Args: X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed". + + Returns: + numpy.array: if return_index, an array of shape (len(X), k) with the indices (in the argument + of :func:`fit`) of the k nearest neighbors to the points of X. If return_distance, an array of the + same shape with the distances to those neighbors. If both, a tuple with the two arrays, in this order. """ if self.params.get("enable_autodiff", False): # pykeops does not support autodiff for kmin yet, but when it does in the future, -- cgit v1.2.3 From acb9d5b9d1317d3d8168bc3ac46860d078abba84 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 20:30:29 +0200 Subject: Check that the gradient is not NaN This can easily happen with pytorch, and there is special code to avoid it. --- src/python/test/test_dtm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index db3e5df5..de74c42b 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -46,14 +46,14 @@ def test_dtm_compare_euclidean(): r6 = dtm.fit_transform(pts2) assert r6.detach().numpy() == pytest.approx(r0) r6.sum().backward() - assert pts2.grad is not None + assert pts2.grad is not None and not torch.isnan(pts2.grad).any() pts2 = torch.tensor(pts, requires_grad=True) assert pts2.grad is None dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True) r7 = dtm.fit_transform(pts2) assert r7.detach().numpy() == pytest.approx(r0) r7.sum().backward() - assert pts2.grad is not None + assert pts2.grad is not None and not torch.isnan(pts2.grad).any() def test_dtm_precomputed(): -- cgit v1.2.3 From d302e90dcf4b284e6dc8b3ab21e8a67fb9cf5179 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 16 Apr 2020 15:40:45 +0200 Subject: Update the concept of the simplicial complex We use the key now. It wouldn't be hard to use an unordered_map, but since we usually have an unused field key... --- src/Alpha_complex/concept/SimplicialComplexForAlpha.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h index 1c6c3b0c..c20c3201 100644 --- a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h +++ b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h @@ -72,6 +72,24 @@ struct SimplicialComplexForAlpha { /** \brief Return type of an insertion of a simplex */ typedef unspecified Insertion_result_type; + + /** \name Map interface + * Conceptually a `std::unordered_map`. + * @{ */ + /** \brief Data stored for each simplex. + * + * Must be an integer type. */ + typedef unspecified Simplex_key; + /** \brief Returns a constant dummy number that is either negative, + * or at least as large as the number of simplices. Suggested value: -1. */ + Simplex_key null_key (); + /** \brief Returns the number stored for a simplex by `assign_key()`. + * + * If `assign_key()` has not been called, it must return `null_key()`. */ + Simplex_key key ( Simplex_handle sh ); + /** \brief Store a number for a simplex, which can later be retrieved with `key()`. */ + void assign_key(Simplex_handle sh, Simplex_key n); + /** @} */ }; } // namespace alpha_complex -- cgit v1.2.3 From 039382cbd951c8c94ddfd43b5ae228666a5cabed Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 16 Apr 2020 17:28:58 +0200 Subject: Fix doc of Simplex_tree about keys --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 430d1ac4..591a9e37 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -463,7 +463,7 @@ class Simplex_tree { public: /** \brief Returns the key associated to a simplex. * - * The filtration must be initialized. + * If no key has been assigned, returns `null_key()`. * \pre SimplexTreeOptions::store_key */ static Simplex_key key(Simplex_handle sh) { @@ -473,7 +473,6 @@ class Simplex_tree { /** \brief Returns the simplex that has index idx in the filtration. * * The filtration must be initialized. - * \pre SimplexTreeOptions::store_key */ Simplex_handle simplex(Simplex_key idx) const { return filtration_vect_[idx]; @@ -509,8 +508,7 @@ class Simplex_tree { return Dictionary_it(nullptr); } - /** \brief Returns a key different for all keys associated to the - * simplices of the simplicial complex. */ + /** \brief Returns a fixed number not in the interval [0, `num_simplices()`). */ static Simplex_key null_key() { return -1; } @@ -856,11 +854,9 @@ class Simplex_tree { public: /** \brief Initializes the filtrations, i.e. sort the - * simplices according to their order in the filtration and initializes all Simplex_keys. + * simplices according to their order in the filtration. * - * After calling this method, filtration_simplex_range() becomes valid, and each simplex is - * assigned a Simplex_key corresponding to its order in the filtration (from 0 to m-1 for a - * simplicial complex with m simplices). + * After calling this method, filtration_simplex_range() becomes valid. * * Will be automatically called when calling filtration_simplex_range() * if the filtration has never been initialized yet. */ -- cgit v1.2.3 From 17aaa979e4cdfe5faed9b2750d452171de4b67e1 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 17 Apr 2020 22:13:29 +0200 Subject: Simplify distance-to-diagonal in Wasserstein --- src/python/gudhi/wasserstein/wasserstein.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 35315939..5df66cf9 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -15,16 +15,19 @@ try: except ImportError: print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT") -def _proj_on_diag(X): +def _dist_to_diag(X, internal_p): ''' :param X: (n x 2) array encoding the points of a persistent diagram. - :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal + :param internal_p: Ground metric (i.e. norm L^p). + :returns: (n) array encoding the (respective orthogonal) distances of the points to the diagonal + + .. note:: + Assumes that the points are above the diagonal. ''' - Z = (X[:,0] + X[:,1]) / 2. - return np.array([Z , Z]).T + return (X[:, 1] - X[:, 0]) * 2 ** (1.0 / internal_p - 1) -def _build_dist_matrix(X, Y, order=2., internal_p=2.): +def _build_dist_matrix(X, Y, order, internal_p): ''' :param X: (n x 2) numpy.array encoding the (points of the) first diagram. :param Y: (m x 2) numpy.array encoding the second diagram. @@ -36,16 +39,12 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.): and its orthogonal projection onto the diagonal. note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal). ''' - Xdiag = _proj_on_diag(X) - Ydiag = _proj_on_diag(Y) + Cxd = _dist_to_diag(X, internal_p)**order + Cdy = _dist_to_diag(Y, internal_p)**order if np.isinf(internal_p): C = sc.cdist(X,Y, metric='chebyshev')**order - Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order - Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order else: C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order - Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order - Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order Cf = np.hstack((C, Cxd[:,None])) Cdy = np.append(Cdy, 0) @@ -61,8 +60,7 @@ def _perstot(X, order, internal_p): :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). ''' - Xdiag = _proj_on_diag(X) - return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order) + return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order) def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): -- cgit v1.2.3 From f93c403b81b4ccb98bfad8e4ef30cdf0e7333f6c Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 18 Apr 2020 23:52:12 +0200 Subject: enable_autodiff for POT wasserstein_distance --- src/python/gudhi/wasserstein/wasserstein.py | 64 +++++++++++++++++++++++----- src/python/test/test_wasserstein_distance.py | 14 ++++-- 2 files changed, 63 insertions(+), 15 deletions(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 5df66cf9..9660b99b 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -53,17 +53,30 @@ def _build_dist_matrix(X, Y, order, internal_p): return Cf -def _perstot(X, order, internal_p): +def _perstot_autodiff(X, order, internal_p): + ''' + Version of _perstot that works on eagerpy tensors. + ''' + return _dist_to_diag(X, internal_p).norms.lp(order) + +def _perstot(X, order, internal_p, enable_autodiff): ''' :param X: (n x 2) numpy.array (points of a given diagram). :param order: exponent for Wasserstein. Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). + :param enable_autodiff: If X is torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation + transparent to automatic differentiation. + :type enable_autodiff: bool :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). ''' - return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order) + if enable_autodiff: + import eagerpy as ep + return _perstot_autodiff(ep.astensor(X), order, internal_p).raw + else: + return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order) -def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): +def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_autodiff=False): ''' :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). @@ -74,6 +87,9 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): :param order: exponent for Wasserstein; Default value is 2. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). + :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation + transparent to automatic differentiation. + :type enable_autodiff: bool :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. If matching is set to True, also returns the optimal matching between X and Y. @@ -82,23 +98,30 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): m = len(Y) # handle empty diagrams - if X.size == 0: - if Y.size == 0: + if n == 0: + if m == 0: if not matching: + # What if enable_autodiff? return 0. else: return 0., np.array([]) else: if not matching: - return _perstot(Y, order, internal_p) + return _perstot(Y, order, internal_p, enable_autodiff) else: - return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)]) - elif Y.size == 0: + return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)]) + elif m == 0: if not matching: - return _perstot(X, order, internal_p) + return _perstot(X, order, internal_p, enable_autodiff) else: - return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)]) - + return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)]) + + if enable_autodiff: + import eagerpy as ep + X_orig = ep.astensor(X) + Y_orig = ep.astensor(Y) + X = X_orig.numpy() + Y = Y_orig.numpy() M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) a = np.ones(n+1) # weight vector of the input diagram. Uniform here. a[-1] = m @@ -106,6 +129,7 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): b[-1] = n if matching: + assert not enable_autodiff, "matching and enable_autodiff are currently incompatible" P = ot.emd(a=a,b=b,M=M, numItermax=2000000) ot_cost = np.sum(np.multiply(P,M)) P[-1, -1] = 0 # Remove matching corresponding to the diagonal @@ -115,6 +139,24 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.): match[:,1][match[:,1] >= m] = -1 return ot_cost ** (1./order) , match + if enable_autodiff: + P = ot.emd(a=a,b=b,M=M, numItermax=2000000) + pairs = np.argwhere(P[:-1, :-1]) + diag2 = np.nonzero(P[-1, :-1]) + diag1 = np.nonzero(P[:-1, -1]) + dists = [] + # empty arrays are not handled properly by the helpers, so we avoid calling them + if len(pairs): + dists.append((Y_orig[pairs[:, 1]] - X_orig[pairs[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order)) + if len(diag1): + dists.append(_perstot_autodiff(X_orig[diag1], order, internal_p)) + if len(diag2): + dists.append(_perstot_autodiff(Y_orig[diag2], order, internal_p)) + dists = [ dist.reshape(1) for dist in dists ] + return ep.concatenate(dists).norms.lp(order) + # Should just compute the L^order norm manually? + # We can also concatenate the 3 vectors to compute just one norm. + # Comptuation of the otcost using the ot.emd2 library. # Note: it is the Wasserstein distance to the power q. # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 7e0d0f5f..5bec5bd3 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -73,14 +73,20 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat -def hera_wrap(delta): +def hera_wrap(**extra): def fun(*kargs,**kwargs): - return hera(*kargs,**kwargs,delta=delta) + return hera(*kargs,**kwargs,**extra) + return fun + +def pot_wrap(**extra): + def fun(*kargs,**kwargs): + return pot(*kargs,**kwargs,**extra) return fun def test_wasserstein_distance_pot(): _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) + _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False) def test_wasserstein_distance_hera(): - _basic_wasserstein(hera_wrap(1e-12), 1e-12, test_matching=False) - _basic_wasserstein(hera_wrap(.1), .1, test_matching=False) + _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False) + _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False) -- cgit v1.2.3 From b2a9ba18ce33778abdd9f5032af4bfff04e8bbd2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 19 Apr 2020 09:06:08 +0200 Subject: Unwrap the result --- src/python/gudhi/wasserstein/wasserstein.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 9660b99b..f0c82962 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -71,6 +71,7 @@ def _perstot(X, order, internal_p, enable_autodiff): ''' if enable_autodiff: import eagerpy as ep + return _perstot_autodiff(ep.astensor(X), order, internal_p).raw else: return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order) @@ -118,6 +119,7 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a if enable_autodiff: import eagerpy as ep + X_orig = ep.astensor(X) Y_orig = ep.astensor(Y) X = X_orig.numpy() @@ -140,10 +142,10 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a return ot_cost ** (1./order) , match if enable_autodiff: - P = ot.emd(a=a,b=b,M=M, numItermax=2000000) + P = ot.emd(a=a, b=b, M=M, numItermax=2000000) pairs = np.argwhere(P[:-1, :-1]) - diag2 = np.nonzero(P[-1, :-1]) diag1 = np.nonzero(P[:-1, -1]) + diag2 = np.nonzero(P[-1, :-1]) dists = [] # empty arrays are not handled properly by the helpers, so we avoid calling them if len(pairs): @@ -152,8 +154,8 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a dists.append(_perstot_autodiff(X_orig[diag1], order, internal_p)) if len(diag2): dists.append(_perstot_autodiff(Y_orig[diag2], order, internal_p)) - dists = [ dist.reshape(1) for dist in dists ] - return ep.concatenate(dists).norms.lp(order) + dists = [dist.reshape(1) for dist in dists] + return ep.concatenate(dists).norms.lp(order).raw # Should just compute the L^order norm manually? # We can also concatenate the 3 vectors to compute just one norm. -- cgit v1.2.3 From 1086b8cad7c1ea2a02742dfc44aef036a674f5d3 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 19 Apr 2020 12:17:42 +0200 Subject: Test gradient --- src/python/test/test_wasserstein_distance.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 5bec5bd3..c6d6b346 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -90,3 +90,16 @@ def test_wasserstein_distance_pot(): def test_wasserstein_distance_hera(): _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False) _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False) + +def test_wasserstein_distance_grad(): + import torch + + diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True) + diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) + diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) + assert diag1.grad is None and diag2.grad is None and diag3.grad is None + dist1 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True) + dist2 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True) + dist1.backward() + dist2.backward() + assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any() -- cgit v1.2.3 From 8d9611206603f4f7506fe77a0273c73c9d67716b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 19 Apr 2020 12:30:35 +0200 Subject: Drop redundant test torch.isnan(None) raises an exception anyway --- src/python/test/test_dtm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index de74c42b..859189fa 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -46,14 +46,14 @@ def test_dtm_compare_euclidean(): r6 = dtm.fit_transform(pts2) assert r6.detach().numpy() == pytest.approx(r0) r6.sum().backward() - assert pts2.grad is not None and not torch.isnan(pts2.grad).any() + assert not torch.isnan(pts2.grad).any() pts2 = torch.tensor(pts, requires_grad=True) assert pts2.grad is None dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True) r7 = dtm.fit_transform(pts2) assert r7.detach().numpy() == pytest.approx(r0) r7.sum().backward() - assert pts2.grad is not None and not torch.isnan(pts2.grad).any() + assert not torch.isnan(pts2.grad).any() def test_dtm_precomputed(): -- cgit v1.2.3 From 1fc55e54ed2f24969a691914edee642f97142fa9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 19 Apr 2020 13:43:23 +0200 Subject: Test comparison with persistence_pairs() --- src/python/test/test_simplex_generators.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py index e3bdc094..8a9b4844 100755 --- a/src/python/test/test_simplex_generators.py +++ b/src/python/test/test_simplex_generators.py @@ -24,6 +24,13 @@ def test_flag_generators(): assert np.array_equal(g[2], [0, 4]) assert len(g[3]) == 1 assert np.array_equal(g[3][0], [[7, 6]]) + # Compare trivial cases (where the simplex is the generator) with persistence_pairs. + # This still makes assumptions on the order of vertices in a simplex and could be more robust. + pairs = st.persistence_pairs() + assert {tuple(i) for i in g[0]} == {(i[0][0],) + tuple(i[1]) for i in pairs if len(i[0]) == 1 and len(i[1]) != 0} + assert {(i[0], i[1]) for i in g[1][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) != 0} + assert set(g[2]) == {i[0][0] for i in pairs if len(i[0]) == 1 and len(i[1]) == 0} + assert {(i[0], i[1]) for i in g[3][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) == 0} def test_lower_star_generators(): -- cgit v1.2.3 From 1c1a99074049e4ff04fa28e7d6e1b6fc2067397a Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 10:38:41 +0200 Subject: Add __license__ --- src/python/gudhi/point_cloud/dtm.py | 4 ++++ src/python/gudhi/point_cloud/knn.py | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py index 58dec536..13e16d24 100644 --- a/src/python/gudhi/point_cloud/dtm.py +++ b/src/python/gudhi/point_cloud/dtm.py @@ -9,6 +9,10 @@ from .knn import KNearestNeighbors +__author__ = "Marc Glisse" +__copyright__ = "Copyright (C) 2020 Inria" +__license__ = "MIT" + class DistanceToMeasure: """ diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index d7cf0b2a..4017e498 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -11,6 +11,10 @@ import numpy # TODO: https://github.com/facebookresearch/faiss +__author__ = "Marc Glisse" +__copyright__ = "Copyright (C) 2020 Inria" +__license__ = "MIT" + class KNearestNeighbors: """ @@ -156,7 +160,9 @@ class KNearestNeighbors: assert self.metric == "minkowski" p = self.params["p"] Y = ep.astensor(self.ref_points) - neighbor_pts = Y[neighbors,] + neighbor_pts = Y[ + neighbors, + ] diff = neighbor_pts - X[:, None, :] if isinstance(diff, ep.PyTorchTensor): # https://github.com/jonasrauber/eagerpy/issues/6 -- cgit v1.2.3 From 3a9105e0d3bea5cc64610b7c0c3fb15f0e00bb9d Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 11:37:44 +0200 Subject: Reintroduce _proj_on_diag, with a unit test --- src/python/gudhi/wasserstein/wasserstein.py | 11 +++++++++++ src/python/test/test_wasserstein_distance.py | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 5df66cf9..efc851a0 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -15,6 +15,17 @@ try: except ImportError: print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT") + +# Currently unused, but Théo says it is likely to be used again. +def _proj_on_diag(X): + ''' + :param X: (n x 2) array encoding the points of a persistent diagram. + :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal + ''' + Z = (X[:,0] + X[:,1]) / 2. + return np.array([Z , Z]).T + + def _dist_to_diag(X, internal_p): ''' :param X: (n x 2) array encoding the points of a persistent diagram. diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 7e0d0f5f..1a4acc1d 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -8,6 +8,7 @@ - YYYY/MM Author: Description of the modification """ +from gudhi.wasserstein.wasserstein import _proj_on_diag from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np @@ -17,6 +18,12 @@ __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" __license__ = "MIT" +def test_proj_on_diag(): + dgm = np.array([[1., 1.], [1., 2.], [3., 5.]]) + assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]]) + empty = np.empty((0, 2)) + assert np.array_equal(_proj_on_diag(empty), empty) + def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) diag2 = np.array([[2.8, 4.45], [9.5, 14.1]]) -- cgit v1.2.3 From 9ef7ba65367ab2ff92bf66b1b8166c5990530b76 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 12:16:15 +0200 Subject: Explicitly pass sort_results=True on some tests --- src/python/test/test_knn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py index 415c9d48..a87ec212 100755 --- a/src/python/test/test_knn.py +++ b/src/python/test/test_knn.py @@ -54,12 +54,12 @@ def test_knn_explicit(): knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False) r = knn.fit_transform(dist) assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) - knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True) + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, sort_results=True) r = knn.fit_transform(dist) assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) # Second time in parallel - knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2) + knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2, sort_results=True) r = knn.fit_transform(dist) assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, n_jobs=2) -- cgit v1.2.3 From bac284bf7f65c40f03ec8e47316d4f0fd0059c91 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 19:12:35 +0200 Subject: Check that dependencies are present before testing --- src/python/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 10dcd161..5ab63e5d 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -401,7 +401,9 @@ if(PYTHONINTERP_FOUND) # Wasserstein if(OT_FOUND AND PYBIND11_FOUND) - add_gudhi_py_test(test_wasserstein_distance) + if(TORCH_FOUND AND EAGERPY_FOUND) + add_gudhi_py_test(test_wasserstein_distance) + endif() add_gudhi_py_test(test_wasserstein_barycenter) endif() -- cgit v1.2.3 From 4ad650bc3184f57e1dda91f6b0a6358830f0562f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 19:42:34 +0200 Subject: Drop one comment --- src/python/gudhi/wasserstein/wasserstein.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 5b61d176..42c8dc2d 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -167,7 +167,6 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a dists.append(_perstot_autodiff(Y_orig[diag2], order, internal_p)) dists = [dist.reshape(1) for dist in dists] return ep.concatenate(dists).norms.lp(order).raw - # Should just compute the L^order norm manually? # We can also concatenate the 3 vectors to compute just one norm. # Comptuation of the otcost using the ot.emd2 library. -- cgit v1.2.3 From 70fb88a668c2cad837cbdea4863a136a1efc71c3 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 20:39:40 +0200 Subject: Random CircleCI tweak --- .circleci/config.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4f86cb12..40ddc08e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -45,7 +45,6 @@ jobs: python: docker: - image: gudhi/ci_for_gudhi:latest - parallelism: 4 steps: - checkout - run: @@ -62,12 +61,12 @@ jobs: cd build; cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..; cd python; - python3 setup.py build_ext -j 4 --inplace; + python3 setup.py build_ext -j 2 --inplace; make sphinx; cp -R sphinx /tmp/sphinx; python3 setup.py install; python3 setup.py clean --all; - ctest -j 4 --output-on-failure; + ctest -j 2 --output-on-failure; - store_artifacts: path: /tmp/sphinx -- cgit v1.2.3 From 3e713cee177e10536ae8fc231e56fa04769a35ee Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 20 Apr 2020 22:06:38 +0200 Subject: Fix #279 --- src/python/CMakeLists.txt | 129 +++++++++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 64 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 10dcd161..055d5b23 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -242,6 +242,71 @@ if(PYTHONINTERP_FOUND) install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)") + # Documentation generation is available through sphinx - requires all modules + # Make it first as sphinx test is by far the longest test which is nice when testing in parallel + if(SPHINX_PATH) + if(MATPLOTLIB_FOUND) + if(NUMPY_FOUND) + if(SCIPY_FOUND) + if(SKLEARN_FOUND) + if(OT_FOUND) + if(PYBIND11_FOUND) + if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") + # User warning - Sphinx is a static pages generator, and configured to work fine with user_version + # Images and biblio warnings because not found on developper version + if (GUDHI_PYTHON_PATH STREQUAL "src/python") + set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") + endif() + # sphinx target requires gudhi.so, because conf.py reads gudhi version from it + add_custom_target(sphinx + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" + COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) + + add_test(NAME sphinx_py_test + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) + + # Set missing or not modules + set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") + else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + else(PYBIND11_FOUND) + message("++ Python documentation module will not be compiled because pybind11 was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(PYBIND11_FOUND) + else(OT_FOUND) + message("++ Python documentation module will not be compiled because POT was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(OT_FOUND) + else(SKLEARN_FOUND) + message("++ Python documentation module will not be compiled because scikit-learn was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SKLEARN_FOUND) + else(SCIPY_FOUND) + message("++ Python documentation module will not be compiled because scipy was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SCIPY_FOUND) + else(NUMPY_FOUND) + message("++ Python documentation module will not be compiled because numpy was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(NUMPY_FOUND) + else(MATPLOTLIB_FOUND) + message("++ Python documentation module will not be compiled because matplotlib was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(MATPLOTLIB_FOUND) + else(SPHINX_PATH) + message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SPHINX_PATH) + + # Test examples if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) # Bottleneck and Alpha @@ -419,70 +484,6 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_dtm) endif() - # Documentation generation is available through sphinx - requires all modules - if(SPHINX_PATH) - if(MATPLOTLIB_FOUND) - if(NUMPY_FOUND) - if(SCIPY_FOUND) - if(SKLEARN_FOUND) - if(OT_FOUND) - if(PYBIND11_FOUND) - if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") - # User warning - Sphinx is a static pages generator, and configured to work fine with user_version - # Images and biblio warnings because not found on developper version - if (GUDHI_PYTHON_PATH STREQUAL "src/python") - set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") - endif() - # sphinx target requires gudhi.so, because conf.py reads gudhi version from it - add_custom_target(sphinx - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx - DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" - COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) - - add_test(NAME sphinx_py_test - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) - - # Set missing or not modules - set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") - else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - else(PYBIND11_FOUND) - message("++ Python documentation module will not be compiled because pybind11 was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(PYBIND11_FOUND) - else(OT_FOUND) - message("++ Python documentation module will not be compiled because POT was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(OT_FOUND) - else(SKLEARN_FOUND) - message("++ Python documentation module will not be compiled because scikit-learn was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SKLEARN_FOUND) - else(SCIPY_FOUND) - message("++ Python documentation module will not be compiled because scipy was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SCIPY_FOUND) - else(NUMPY_FOUND) - message("++ Python documentation module will not be compiled because numpy was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(NUMPY_FOUND) - else(MATPLOTLIB_FOUND) - message("++ Python documentation module will not be compiled because matplotlib was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(MATPLOTLIB_FOUND) - else(SPHINX_PATH) - message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SPHINX_PATH) - - # Set missing or not modules set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES") else(CYTHON_FOUND) -- cgit v1.2.3 From aa90b98bee73ab2aaf39ef91f39f5a750168e5d4 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 13:04:15 +0200 Subject: Document several optional dependencies of knn --- src/python/doc/installation.rst | 28 ++++++++++++++++++++++++++++ src/python/gudhi/point_cloud/knn.py | 3 ++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 48425d5e..09a843d5 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -211,6 +211,14 @@ The following examples requires CGAL version ≥ 4.11.0: * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` +EagerPy +======= + +Some Python functions can handle automatic differentiation (possibly only when +a flag `enable_autodiff=True` is used). In order to reduce code duplication, we +use `EagerPy `_ which wraps arrays from +PyTorch, TensorFlow and JAX in a common interface. + Eigen ===== @@ -229,6 +237,13 @@ The following examples require `Eigen `_ version * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` +Hnswlib +======= + +:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package +`Hnswlib `_ as a backend if explicitly +requested, to speed-up queries. + Matplotlib ========== @@ -251,6 +266,13 @@ The following examples require the `Matplotlib `_: * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` +PyKeOps +======= + +:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package +`PyKeOps `_ as a backend if +explicitly requested, to speed-up queries using a GPU. + Python Optimal Transport ======================== @@ -258,6 +280,12 @@ The :doc:`Wasserstein distance ` module requires `POT `_, a library that provides several solvers for optimization problems related to Optimal Transport. +PyTorch +======= + +`PyTorch `_ is currently only used as a dependency of +`PyKeOps`_, and in some tests. + Scikit-learn ============ diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 4017e498..07553d6d 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -44,7 +44,8 @@ class KNearestNeighbors: Otherwise, neighbors are returned in an arbitrary order. Defaults to True. enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.ndarray or tensorflow.Tensor, this instructs the function to compute distances in a way that works with automatic differentiation. - This is experimental and not supported for all metrics. Defaults to False. + This is experimental, not supported for all metrics, and requires the package EagerPy. + Defaults to False. kwargs: additional parameters are forwarded to the backends. """ self.k = k -- cgit v1.2.3 From da2a7a68f8f57495080af37cf981f64228d165a2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 14:06:02 +0200 Subject: Rename local variables --- src/python/gudhi/wasserstein/wasserstein.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 42c8dc2d..3d1caeb3 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -154,17 +154,17 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a if enable_autodiff: P = ot.emd(a=a, b=b, M=M, numItermax=2000000) - pairs = np.argwhere(P[:-1, :-1]) - diag1 = np.nonzero(P[:-1, -1]) - diag2 = np.nonzero(P[-1, :-1]) + pairs_X_Y = np.argwhere(P[:-1, :-1]) + pairs_X_diag = np.nonzero(P[:-1, -1]) + pairs_Y_diag = np.nonzero(P[-1, :-1]) dists = [] # empty arrays are not handled properly by the helpers, so we avoid calling them - if len(pairs): - dists.append((Y_orig[pairs[:, 1]] - X_orig[pairs[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order)) - if len(diag1): - dists.append(_perstot_autodiff(X_orig[diag1], order, internal_p)) - if len(diag2): - dists.append(_perstot_autodiff(Y_orig[diag2], order, internal_p)) + if len(pairs_X_Y): + dists.append((Y_orig[pairs_X_Y[:, 1]] - X_orig[pairs_X_Y[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order)) + if len(pairs_X_diag): + dists.append(_perstot_autodiff(X_orig[pairs_X_diag], order, internal_p)) + if len(pairs_Y_diag): + dists.append(_perstot_autodiff(Y_orig[pairs_Y_diag], order, internal_p)) dists = [dist.reshape(1) for dist in dists] return ep.concatenate(dists).norms.lp(order).raw # We can also concatenate the 3 vectors to compute just one norm. -- cgit v1.2.3 From 51f7b5bb15f351d08af4c26bd1ffdfe979199976 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 16:29:26 +0200 Subject: Test value of computed gradient --- src/python/test/test_wasserstein_distance.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 6bfcb2ee..90d26809 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -105,8 +105,19 @@ def test_wasserstein_distance_grad(): diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) assert diag1.grad is None and diag2.grad is None and diag3.grad is None - dist1 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True) - dist2 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True) - dist1.backward() - dist2.backward() + dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True) + dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True) + dist12.backward() + dist30.backward() assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any() + diag4 = torch.tensor([[0., 10.]], requires_grad=True) + diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True) + dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True) + assert dist45 == 3. + dist45.backward() + assert np.array_equal(diag4.grad, [[-1., -1.]]) + assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]]) + diag6 = torch.tensor([[5., 10.]], requires_grad=True) + pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward() + # https://github.com/jonasrauber/eagerpy/issues/6 + # assert np.array_equal(diag6.grad, [[0., 0.]]) -- cgit v1.2.3 From ba17759cf922d246a0a74ac5cf99f67d48a7d8c3 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 16:52:27 +0200 Subject: Clarify the doc of enable_autodiff --- src/python/gudhi/wasserstein/wasserstein.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 3d1caeb3..0d164eda 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -100,7 +100,10 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation - transparent to automatic differentiation. + transparent to automatic differentiation. This requires the package EagerPy. + + .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y + and lets the various frameworks compute its gradient. It never pulls new points from the diagonal. :type enable_autodiff: bool :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. -- cgit v1.2.3 From a643583a4740fc40cf1e06e6cc1b4d17ca14000f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 17:39:52 +0200 Subject: Document incompatibility of matching=True and enable_autodiff --- src/python/gudhi/wasserstein/wasserstein.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 0d164eda..89ecab1c 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -100,7 +100,8 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_a :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm). :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation - transparent to automatic differentiation. This requires the package EagerPy. + transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible + with `matching=True`. .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y and lets the various frameworks compute its gradient. It never pulls new points from the diagonal. -- cgit v1.2.3 From c5db8c1aec523c0cdf72c75b29e4ba94b51487b8 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 22 Apr 2020 19:46:29 +0200 Subject: Reduce the probability of failure of test_dtm It is expected that hnsw sometimes misses one neighbor, which has an impact on the DTM, especially if the number of neighbors considered is low. --- src/python/test/test_dtm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py index 859189fa..bff4c267 100755 --- a/src/python/test/test_dtm.py +++ b/src/python/test/test_dtm.py @@ -16,7 +16,7 @@ import torch def test_dtm_compare_euclidean(): pts = numpy.random.rand(1000, 4) - k = 3 + k = 6 dtm = DistanceToMeasure(k, implementation="ckdtree") r0 = dtm.fit_transform(pts) dtm = DistanceToMeasure(k, implementation="sklearn") @@ -27,7 +27,7 @@ def test_dtm_compare_euclidean(): assert r2 == pytest.approx(r0) dtm = DistanceToMeasure(k, implementation="hnsw") r3 = dtm.fit_transform(pts) - assert r3 == pytest.approx(r0) + assert r3 == pytest.approx(r0, rel=0.1) from scipy.spatial.distance import cdist d = cdist(pts, pts) -- cgit v1.2.3 From 0f7fe01852dcf827da35460592bd3a17ca0ab08e Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 23 Apr 2020 13:30:32 +0200 Subject: Fix pasto in the doc --- src/python/gudhi/simplex_tree.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 7728ebfc..93f5b332 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -560,7 +560,7 @@ cdef class SimplexTree: """This function writes the persistence intervals of the simplicial complex in a user given file name. - :param persistence_file: The specific dimension. + :param persistence_file: Name of the file. :type persistence_file: string. :note: intervals_in_dim function requires -- cgit v1.2.3 From 65f6ca41a9cd6574a0ca8fa9b781c787064fe4ed Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 23 Apr 2020 14:40:44 +0200 Subject: Add missing DOI --- biblio/bibliography.bib | 2 ++ 1 file changed, 2 insertions(+) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index b017a07e..07623a31 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -30,6 +30,7 @@ journal = {Foundations of Computational Mathematics}, number = {6}, pages = {1333--1396}, publisher = {Springer-Verlag}, +doi = {10.1007/s10208-017-9370-z}, title = {{Structure and stability of the one-dimensional Mapper}}, volume = {18}, year = {2017} @@ -47,6 +48,7 @@ journal = {Foundations of Computational Mathematics}, number = {1}, pages = {79--103}, publisher = {Springer-Verlag}, +doi = {10.1007/s10208-008-9027-z}, title = {{Extending persistence using Poincar{\'{e}} and Lefschetz duality}}, volume = {9}, year = {2009} -- cgit v1.2.3 From 658a754397287e8de216ae91d3c9a3c492e4db2d Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 24 Apr 2020 09:00:39 +0200 Subject: Fix bibliography for sphinx --- src/python/doc/alpha_complex_user.rst | 11 ++--------- src/python/doc/bottleneck_distance_user.rst | 6 ------ src/python/doc/cubical_complex_user.rst | 7 ------- src/python/doc/index.rst | 7 ------- src/python/doc/nerve_gic_complex_ref.rst | 7 ------- src/python/doc/nerve_gic_complex_user.rst | 7 ------- src/python/doc/persistent_cohomology_user.rst | 7 ------- src/python/doc/rips_complex_user.rst | 7 ------- src/python/doc/simplex_tree_user.rst | 7 ------- src/python/doc/tangential_complex_user.rst | 8 -------- src/python/doc/wasserstein_distance_user.rst | 7 ------- src/python/doc/witness_complex_user.rst | 7 ------- src/python/doc/zbibliography.rst | 10 ++++++++++ 13 files changed, 12 insertions(+), 86 deletions(-) create mode 100644 src/python/doc/zbibliography.rst diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 265a82d2..c65e62c8 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -10,9 +10,8 @@ Definition .. include:: alpha_complex_sum.inc `AlphaComplex` is constructing a :doc:`SimplexTree ` using -`Delaunay Triangulation `_ -:cite:`cgal:hdj-t-19b` from `CGAL `_ (the Computational Geometry Algorithms Library -:cite:`cgal:eb-19b`). +`Delaunay Triangulation `_ +from `CGAL `_ (the Computational Geometry Algorithms Library). Remarks ^^^^^^^ @@ -203,9 +202,3 @@ the program output is: [4, 5, 6] -> 22.74 [3, 6] -> 30.25 -CGAL citations --------------- - -.. bibliography:: ../../biblio/how_to_cite_cgal.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst index 206fcb63..89da89d3 100644 --- a/src/python/doc/bottleneck_distance_user.rst +++ b/src/python/doc/bottleneck_distance_user.rst @@ -66,9 +66,3 @@ The output is: Bottleneck distance approximation = 0.81 Bottleneck distance value = 0.75 -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index e8c94bf6..e4733653 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -158,10 +158,3 @@ Examples. --------- End user programs are available in python/example/ folder. - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst index c153cdfc..13e51047 100644 --- a/src/python/doc/index.rst +++ b/src/python/doc/index.rst @@ -86,10 +86,3 @@ Point cloud utilities ********************* .. include:: point_cloud_sum.inc - -Bibliography -************ - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/nerve_gic_complex_ref.rst b/src/python/doc/nerve_gic_complex_ref.rst index 6a81b7af..abde2e8c 100644 --- a/src/python/doc/nerve_gic_complex_ref.rst +++ b/src/python/doc/nerve_gic_complex_ref.rst @@ -12,10 +12,3 @@ Cover complexes reference manual :show-inheritance: .. automethod:: gudhi.CoverComplex.__init__ - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index f709ce91..9101f45d 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -313,10 +313,3 @@ the program outputs again SC.dot which gives the following visualization after u :alt: Visualization with neato Visualization with neato - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index 506fa3a7..4d743aac 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -111,10 +111,3 @@ We provide several example files: run these examples with -h for details on thei * :download:`rips_complex_diagram_persistence_from_distance_matrix_file_example.py <../example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py>` * :download:`random_cubical_complex_persistence_example.py <../example/random_cubical_complex_persistence_example.py>` * :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>` - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index c4bbcfb6..8efb12e6 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -347,10 +347,3 @@ until dimension 1 - one skeleton graph in other words), the output is: points in the persistence diagram will be under the diagonal, and bottleneck distance and persistence graphical tool will not work properly, this is a known issue. - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst index 1b272c35..3df7617f 100644 --- a/src/python/doc/simplex_tree_user.rst +++ b/src/python/doc/simplex_tree_user.rst @@ -66,10 +66,3 @@ The output is: ([1, 2], 4.0) ([1], 0.0) ([2], 4.0) - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst index cf8199cc..3d45473b 100644 --- a/src/python/doc/tangential_complex_user.rst +++ b/src/python/doc/tangential_complex_user.rst @@ -194,11 +194,3 @@ The output is: Tangential contains 4 vertices. Inconsistencies has been fixed. - - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index c24da74d..c443bab5 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -164,10 +164,3 @@ The output is: [[0.27916667 0.55416667] [0.7375 0.7625 ] [0.2375 0.2625 ]] - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst index 799f5444..08dcd288 100644 --- a/src/python/doc/witness_complex_user.rst +++ b/src/python/doc/witness_complex_user.rst @@ -126,10 +126,3 @@ Example2: Computing persistence using strong relaxed witness complex Here is an example of constructing a strong witness complex filtration and computing persistence on it: * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` - -Bibliography ------------- - -.. bibliography:: ../../biblio/bibliography.bib - :filter: docname in docnames - :style: unsrt diff --git a/src/python/doc/zbibliography.rst b/src/python/doc/zbibliography.rst new file mode 100644 index 00000000..4c377b46 --- /dev/null +++ b/src/python/doc/zbibliography.rst @@ -0,0 +1,10 @@ +:orphan: + +.. To get rid of WARNING: document isn't included in any toctree + +Bibliography +------------ + +.. bibliography:: ../../biblio/bibliography.bib + :style: unsrt + -- cgit v1.2.3 From 66337063d2ee3770275268c264548e99db3ec7f0 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 24 Apr 2020 19:11:05 +0200 Subject: Code review: plain instead of unsrt for biblio - concatenate biblio files - undo cgal citation removal --- src/cmake/modules/GUDHI_user_version_target.cmake | 6 +++++- src/python/doc/alpha_complex_user.rst | 3 ++- src/python/doc/zbibliography.rst | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake index 257d1939..9cf648e3 100644 --- a/src/cmake/modules/GUDHI_user_version_target.cmake +++ b/src/cmake/modules/GUDHI_user_version_target.cmake @@ -26,8 +26,12 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E # Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation string(TIMESTAMP GUDHI_VERSION_YEAR "%Y") configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY) -file(COPY "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/") file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/") + +# append cgal citation inside bibliography - sphinx cannot deal with more than one bib file +file(READ "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" CGAL_CITATION_CONTENT) +file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib" "${CGAL_CITATION_CONTENT}") + # Copy biblio directory for user version add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index c65e62c8..a3b35c10 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -11,7 +11,8 @@ Definition `AlphaComplex` is constructing a :doc:`SimplexTree ` using `Delaunay Triangulation `_ -from `CGAL `_ (the Computational Geometry Algorithms Library). +:cite:`cgal:hdj-t-19b` from `CGAL `_ (the Computational Geometry Algorithms Library +:cite:`cgal:eb-19b`). Remarks ^^^^^^^ diff --git a/src/python/doc/zbibliography.rst b/src/python/doc/zbibliography.rst index 4c377b46..e23fcf25 100644 --- a/src/python/doc/zbibliography.rst +++ b/src/python/doc/zbibliography.rst @@ -6,5 +6,5 @@ Bibliography ------------ .. bibliography:: ../../biblio/bibliography.bib - :style: unsrt + :style: plain -- cgit v1.2.3 From 9e9511152a0495d123091d04af264e187fc6ab21 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sat, 25 Apr 2020 11:02:14 +0200 Subject: Fix #259 --- src/python/gudhi/persistence_graphical_tools.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index cc3db467..03fc9066 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -109,9 +109,6 @@ def plot_persistence_barcode( plt.rc('text', usetex=True) plt.rc('font', family='serif') - - persistence = _array_handler(persistence) - if persistence_file != "": if path.isfile(persistence_file): # Reset persistence @@ -126,6 +123,8 @@ def plot_persistence_barcode( print("file " + persistence_file + " not found.") return None + persistence = _array_handler(persistence) + if max_barcodes != 1000: print("Deprecated parameter. It has been replaced by max_intervals") max_intervals = max_barcodes @@ -255,8 +254,6 @@ def plot_persistence_diagram( plt.rc('text', usetex=True) plt.rc('font', family='serif') - persistence = _array_handler(persistence) - if persistence_file != "": if path.isfile(persistence_file): # Reset persistence @@ -271,6 +268,8 @@ def plot_persistence_diagram( print("file " + persistence_file + " not found.") return None + persistence = _array_handler(persistence) + if max_plots != 1000: print("Deprecated parameter. It has been replaced by max_intervals") max_intervals = max_plots @@ -425,8 +424,6 @@ def plot_persistence_density( plt.rc('text', usetex=True) plt.rc('font', family='serif') - persistence = _array_handler(persistence) - if persistence_file != "": if dimension is None: # All dimension case @@ -440,6 +437,7 @@ def plot_persistence_density( return None if len(persistence) > 0: + persistence = _array_handler(persistence) persistence_dim = np.array( [ (dim_interval[1][0], dim_interval[1][1]) -- cgit v1.2.3 From ae80ba10d9bf333a418b255e72c0be2a3c7e73ae Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sun, 26 Apr 2020 09:16:31 +0200 Subject: Fix alpha complex user sphinx warnings as sphinx was confusing bullet lists and bold font syntax --- src/python/doc/alpha_complex_user.rst | 39 +++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index a3b35c10..60a2f94e 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -89,25 +89,28 @@ In order to build the alpha complex, first, a Simplex tree is built from the cel Filtration value computation algorithm ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - **for** i : dimension :math:`\rightarrow` 0 **do** - **for all** :math:`\sigma` of dimension i - **if** filtration(:math:`\sigma`) is NaN **then** - filtration(:math:`\sigma`) = :math:`\alpha^2(\sigma)` - **end if** +.. code-block:: bash + + for i : dimension → 0 do + for all σ of dimension i + if filtration(σ) is NaN then + filtration(σ)=α2(σ) + end if + for all τ face of σ do // propagate alpha filtration value + if filtration(τ) is not NaN then + filtration(τ) = min( filtration(τ), filtration(σ) ) + else + if τ is not Gabriel for σ then + filtration(τ) = filtration(σ) + end if + end if + end for + end for + end for + + make_filtration_non_decreasing() + prune_above_filtration() - *//propagate alpha filtration value* - - **for all** :math:`\tau` face of :math:`\sigma` - **if** filtration(:math:`\tau`) is not NaN **then** - filtration(:math:`\tau`) = filtration(:math:`\sigma`) - **end if** - **end for** - **end for** - **end for** - - make_filtration_non_decreasing() - - prune_above_filtration() Dimension 2 ^^^^^^^^^^^ -- cgit v1.2.3 From f47b9607519b5c8c89bbe40341cf5bcc1382f5ef Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sun, 26 Apr 2020 10:08:29 +0200 Subject: Fix barycenter sphinx warnings --- src/python/doc/alpha_complex_user.rst | 2 +- src/python/gudhi/wasserstein/barycenter.py | 53 +++++++++++++----------------- 2 files changed, 24 insertions(+), 31 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 60a2f94e..02d85389 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -94,7 +94,7 @@ Filtration value computation algorithm for i : dimension → 0 do for all σ of dimension i if filtration(σ) is NaN then - filtration(σ)=α2(σ) + filtration(σ)=α²(σ) end if for all τ face of σ do // propagate alpha filtration value if filtration(τ) is not NaN then diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index de7aea81..1cf8edb3 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -18,8 +18,7 @@ from gudhi.wasserstein import wasserstein_distance def _mean(x, m): ''' :param x: a list of 2D-points, off diagonal, x_0... x_{k-1} - :param m: total amount of points taken into account, - that is we have (m-k) copies of diagonal + :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal :returns: the weighted mean of x with (m-k) copies of the diagonal ''' k = len(x) @@ -33,37 +32,31 @@ def _mean(x, m): def lagrangian_barycenter(pdiagset, init=None, verbose=False): ''' - :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` - (`n` can variate), encoding a set of - persistence diagrams with only finite coordinates. + :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` (`n` can variate), encoding a set of persistence + diagrams with only finite coordinates. :param init: The initial value for barycenter estimate. - If ``None``, init is made on a random diagram from the dataset. - Otherwise, it can be an ``int`` - (then initialization is made on ``pdiagset[init]``) - or a `(n x 2)` ``numpy.array`` enconding - a persistence diagram with `n` points. + If ``None``, init is made on a random diagram from the dataset. + Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``) + or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points. :type init: ``int``, or (n x 2) ``np.array`` - :param verbose: if ``True``, returns additional information about the - barycenter. + :param verbose: if ``True``, returns additional information about the barycenter. :type verbose: boolean - :returns: If not verbose (default), a ``numpy.array`` encoding - the barycenter estimate of pdiagset - (local minimum of the energy function). - If ``pdiagset`` is empty, returns ``None``. - If verbose, returns a couple ``(Y, log)`` - where ``Y`` is the barycenter estimate, - and ``log`` is a ``dict`` that contains additional informations: - - - `"groupings"`, a list of list of pairs ``(i,j)``. - Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates - that ``pdiagset[k][i]`` is matched to ``Y[j]`` - if ``i = -1`` or ``j = -1``, it means they - represent the diagonal. - - - `"energy"`, ``float`` representing the Frechet energy value obtained. - It is the mean of squared distances of observations to the output. - - - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm. + :returns: If not verbose (default), a ``numpy.array`` encoding the barycenter estimate of pdiagset + (local minimum of the energy function). + If ``pdiagset`` is empty, returns ``None``. + If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate, + and ``log`` is a ``dict`` that contains additional informations: + + - `"groupings"`, a list of list of pairs ``(i,j)``. + + Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that ``pdiagset[k][i]`` is matched to ``Y[j]`` + if ``i = -1`` or ``j = -1``, it means they represent the diagonal. + + - `"energy"`, ``float`` representing the Frechet energy value obtained. + + It is the mean of squared distances of observations to the output. + + - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm. ''' X = pdiagset # to shorten notations, not a copy m = len(X) # number of diagrams we are averaging -- cgit v1.2.3 From 88043e6b9da458eee7bdb0b9793f94a4e7d0aaa0 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sun, 26 Apr 2020 10:24:30 +0200 Subject: vim code block has a better highlighting code --- src/python/doc/alpha_complex_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 02d85389..ec218969 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -89,7 +89,7 @@ In order to build the alpha complex, first, a Simplex tree is built from the cel Filtration value computation algorithm ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. code-block:: bash +.. code-block:: vim for i : dimension → 0 do for all σ of dimension i -- cgit v1.2.3 From 484732c8ad30721ba4fa596bcb8a3835ad3bc431 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 27 Apr 2020 07:06:16 +0200 Subject: lint pseudo code --- src/python/doc/alpha_complex_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index ec218969..de706de9 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -94,7 +94,7 @@ Filtration value computation algorithm for i : dimension → 0 do for all σ of dimension i if filtration(σ) is NaN then - filtration(σ)=α²(σ) + filtration(σ) = α²(σ) end if for all τ face of σ do // propagate alpha filtration value if filtration(τ) is not NaN then -- cgit v1.2.3 From 87311ec2d59211320e763bc9bc531858b489ff7e Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 28 Apr 2020 13:28:10 -0400 Subject: added call methods + other fixes --- .../diagram_vectorizations_distances_kernels.py | 98 +++++++--------------- src/python/gudhi/representations/kernel_methods.py | 88 +++++++++++++++---- src/python/gudhi/representations/metrics.py | 97 +++++++++++++++++---- src/python/gudhi/representations/preprocessing.py | 60 +++++++++++++ src/python/gudhi/representations/vector_methods.py | 84 +++++++++++++++++++ 5 files changed, 326 insertions(+), 101 deletions(-) diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index de22d9e7..ab7d8a16 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -9,26 +9,23 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet TopologicalVector, DiagramScaler, BirthPersistenceTransform,\ PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\ - SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel + SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel, WassersteinDistance -D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]]) -diags = [D] +D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]]) -diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags) -diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags) -diags = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]).fit_transform(diags) +proc1, proc2, proc3 = DiagramSelector(use=True, point_type="finite"), DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]), DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]) +D1 = proc3(proc2(proc1(D1))) -D = diags[0] -plt.scatter(D[:,0],D[:,1]) +plt.scatter(D1[:,0], D1[:,1]) plt.plot([0.,1.],[0.,1.]) plt.title("Test Persistence Diagram for vector methods") plt.show() LS = Landscape(resolution=1000) -L = LS.fit_transform(diags) -plt.plot(L[0][:1000]) -plt.plot(L[0][1000:2000]) -plt.plot(L[0][2000:3000]) +L = LS(D1) +plt.plot(L[:1000]) +plt.plot(L[1000:2000]) +plt.plot(L[2000:3000]) plt.title("Landscape") plt.show() @@ -36,50 +33,39 @@ def pow(n): return lambda x: np.power(x[1]-x[0],n) SH = Silhouette(resolution=1000, weight=pow(2)) -sh = SH.fit_transform(diags) -plt.plot(sh[0]) +plt.plot(SH(D1)) plt.title("Silhouette") plt.show() BC = BettiCurve(resolution=1000) -bc = BC.fit_transform(diags) -plt.plot(bc[0]) +plt.plot(BC(D1)) plt.title("Betti Curve") plt.show() CP = ComplexPolynomial(threshold=-1, polynomial_type="T") -cp = CP.fit_transform(diags) -print("Complex polynomial is " + str(cp[0,:])) +print("Complex polynomial is " + str(CP(D1))) TV = TopologicalVector(threshold=-1) -tv = TV.fit_transform(diags) -print("Topological vector is " + str(tv[0,:])) +print("Topological vector is " + str(TV(D1))) PI = PersistenceImage(bandwidth=.1, weight=lambda x: x[1], im_range=[0,1,0,1], resolution=[100,100]) -pi = PI.fit_transform(diags) -plt.imshow(np.flip(np.reshape(pi[0], [100,100]), 0)) +plt.imshow(np.flip(np.reshape(PI(D1), [100,100]), 0)) plt.title("Persistence Image") plt.show() ET = Entropy(mode="scalar") -et = ET.fit_transform(diags) -print("Entropy statistic is " + str(et[0,:])) +print("Entropy statistic is " + str(ET(D1))) ET = Entropy(mode="vector", normalized=False) -et = ET.fit_transform(diags) -plt.plot(et[0]) +plt.plot(ET(D1)) plt.title("Entropy function") plt.show() -D = np.array([[1.,5.],[3.,6.],[2.,7.]]) -diags2 = [D] +D2 = np.array([[1.,5.],[3.,6.],[2.,7.]]) +D2 = proc3(proc2(proc1(D2))) -diags2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags2) - -D = diags[0] -plt.scatter(D[:,0],D[:,1]) -D = diags2[0] -plt.scatter(D[:,0],D[:,1]) +plt.scatter(D1[:,0], D1[:,1]) +plt.scatter(D2[:,0], D2[:,1]) plt.plot([0.,1.],[0.,1.]) plt.title("Test Persistence Diagrams for kernel methods") plt.show() @@ -88,56 +74,34 @@ def arctan(C,p): return lambda x: C*np.arctan(np.power(x[1], p)) PWG = PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.)) -X = PWG.fit(diags) -Y = PWG.transform(diags2) -print("PWG kernel is " + str(Y[0][0])) +print("PWG kernel is " + str(PWG(D1, D2))) PWG = PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.)) -X = PWG.fit(diags) -Y = PWG.transform(diags2) -print("Approximate PWG kernel is " + str(Y[0][0])) +print("Approximate PWG kernel is " + str(PWG(D1, D2))) PSS = PersistenceScaleSpaceKernel(bandwidth=1.) -X = PSS.fit(diags) -Y = PSS.transform(diags2) -print("PSS kernel is " + str(Y[0][0])) +print("PSS kernel is " + str(PSS(D1, D2))) PSS = PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2]))) -X = PSS.fit(diags) -Y = PSS.transform(diags2) -print("Approximate PSS kernel is " + str(Y[0][0])) +print("Approximate PSS kernel is " + str(PSS(D1, D2))) sW = SlicedWassersteinDistance(num_directions=100) -X = sW.fit(diags) -Y = sW.transform(diags2) -print("SW distance is " + str(Y[0][0])) +print("SW distance is " + str(sW(D1, D2))) SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.) -X = SW.fit(diags) -Y = SW.transform(diags2) -print("SW kernel is " + str(Y[0][0])) +print("SW kernel is " + str(SW(D1, D2))) W = WassersteinDistance(order=2, internal_p=2, mode="pot") -X = W.fit(diags) -Y = W.transform(diags2) -print("Wasserstein distance (POT) is " + str(Y[0][0])) +print("Wasserstein distance (POT) is " + str(W(D1, D2))) W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001) -X = W.fit(diags) -Y = W.transform(diags2) -print("Wasserstein distance (hera) is " + str(Y[0][0])) +print("Wasserstein distance (hera) is " + str(W(D1, D2))) W = BottleneckDistance(epsilon=.001) -X = W.fit(diags) -Y = W.transform(diags2) -print("Bottleneck distance is " + str(Y[0][0])) +print("Bottleneck distance is " + str(W(D1, D2))) PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.) -X = PF.fit(diags) -Y = PF.transform(diags2) -print("PF kernel is " + str(Y[0][0])) +print("PF kernel is " + str(PF(D1, D2))) PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2]))) -X = PF.fit(diags) -Y = PF.transform(diags2) -print("Approximate PF kernel is " + str(Y[0][0])) +print("Approximate PF kernel is " + str(PF(D1, D2))) diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py index 50186d63..edd1382a 100644 --- a/src/python/gudhi/representations/kernel_methods.py +++ b/src/python/gudhi/representations/kernel_methods.py @@ -10,14 +10,14 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import pairwise_distances, pairwise_kernels -from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, sklearn_wrapper, pairwise_persistence_diagram_distances, sliced_wasserstein_distance, persistence_fisher_distance +from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance from .preprocessing import Padding ############################################# # Kernel methods ############################ ############################################# -def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.): +def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details. @@ -25,7 +25,7 @@ def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_appr D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate). D2: (m x 2) numpy.array encoding the second diagram. bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved - weight: weight function for the persistence diagram points. This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y]. + weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y]. kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance). Returns: @@ -42,7 +42,7 @@ def persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_appr E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth)) return np.sum(np.multiply(W, E)) -def persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): +def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details. @@ -58,32 +58,32 @@ def persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.): DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0) DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0) weight_pss = lambda x: 1 if x[1] >= x[0] else -1 - return 0.5 * persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth) + return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth) -def pairwise_persistence_diagram_kernels(X, Y=None, metric="sliced_wasserstein", **kwargs): +def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", **kwargs): """ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2). Parameters: X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only. - metric: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. Returns: numpy array of shape (nxm): kernel matrix. """ XX = np.reshape(np.arange(len(X)), [-1,1]) YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1]) - if metric == "sliced_wasserstein": + if kernel == "sliced_wasserstein": return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"]) - elif metric == "persistence_fisher": + elif kernel == "persistence_fisher": return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"]) - elif metric == "persistence_scale_space": - return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_scale_space_kernel, X, Y, **kwargs)) - elif metric == "persistence_weighted_gaussian": - return pairwise_kernels(XX, YY, metric=sklearn_wrapper(persistence_weighted_gaussian_kernel, X, Y, **kwargs)) + elif kernel == "persistence_scale_space": + return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs)) + elif kernel == "persistence_weighted_gaussian": + return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs)) else: - return pairwise_kernels(XX, YY, metric=sklearn_wrapper(metric, **kwargs)) + return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(metric, **kwargs)) class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): """ @@ -121,7 +121,20 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values. """ - return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions) + + def __call__(self, diag1, diag2): + """ + Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: sliced Wasserstein kernel value. + """ + return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin): """ @@ -160,7 +173,20 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values. """ - return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx) + + def __call__(self, diag1, diag2): + """ + Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: persistence weighted Gaussian kernel value. + """ + return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth) class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): """ @@ -197,7 +223,20 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values. """ - return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) + + def __call__(self, diag1, diag2): + """ + Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: persistence scale space kernel value. + """ + return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) class PersistenceFisherKernel(BaseEstimator, TransformerMixin): """ @@ -236,5 +275,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin): Returns: numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values. """ - return pairwise_persistence_diagram_kernels(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx) + return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx) + + def __call__(self, diag1, diag2): + """ + Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: persistence Fisher kernel value. + """ + return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 59440b1a..a4bf19a6 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -17,7 +17,7 @@ from .preprocessing import Padding # Metrics ################################### ############################################# -def sliced_wasserstein_distance(D1, D2, num_directions): +def _sliced_wasserstein_distance(D1, D2, num_directions): """ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. @@ -42,7 +42,7 @@ def sliced_wasserstein_distance(D1, D2, num_directions): L1 = np.sum(np.abs(A-B), axis=0) return np.mean(L1) -def compute_persistence_diagram_projections(X, num_directions): +def _compute_persistence_diagram_projections(X, num_directions): """ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams. @@ -51,14 +51,14 @@ def compute_persistence_diagram_projections(X, num_directions): num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation. Returns: - XX (list of n numpy arrays of shape (2*numx2)): list of projected persistence diagrams. + list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams. """ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X] return XX -def sliced_wasserstein_distance_on_projections(D1, D2): +def _sliced_wasserstein_distance_on_projections(D1, D2): """ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details. @@ -76,7 +76,7 @@ def sliced_wasserstein_distance_on_projections(D1, D2): L1 = np.sum(np.abs(A-B), axis=0) return np.mean(L1) -def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): +def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): """ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. @@ -118,7 +118,7 @@ def persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.): vectorj = vectorj/vectorj_sum return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) ) -def sklearn_wrapper(metric, X, Y, **kwargs): +def _sklearn_wrapper(metric, X, Y, **kwargs): """ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments. """ @@ -133,7 +133,7 @@ def sklearn_wrapper(metric, X, Y, **kwargs): PAIRWISE_DISTANCE_FUNCTIONS = { "wasserstein": hera_wasserstein_distance, "hera_wasserstein": hera_wasserstein_distance, - "persistence_fisher": persistence_fisher_distance, + "persistence_fisher": _persistence_fisher_distance, } def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs): @@ -143,7 +143,7 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa Parameters: X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. - metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a symmetric function taking two numpy arrays of shape (nx2) and (mx2) as inputs. Returns: numpy array of shape (nxm): distance matrix @@ -153,25 +153,25 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa if metric == "bottleneck": try: from .. import bottleneck_distance - return pairwise_distances(XX, YY, metric=sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) + return pairwise_distances(XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs)) except ImportError: print("Gudhi built without CGAL") raise elif metric == "pot_wasserstein": try: from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance - return pairwise_distances(XX, YY, metric=sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) + return pairwise_distances(XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs)) except ImportError: print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'") raise elif metric == "sliced_wasserstein": - Xproj = compute_persistence_diagram_projections(X, **kwargs) - Yproj = None if Y is None else compute_persistence_diagram_projections(Y, **kwargs) - return pairwise_distances(XX, YY, metric=sklearn_wrapper(sliced_wasserstein_distance_on_projections, Xproj, Yproj)) + Xproj = _compute_persistence_diagram_projections(X, **kwargs) + Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs) + return pairwise_distances(XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj)) elif type(metric) == str: - return pairwise_distances(XX, YY, metric=sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs)) + return pairwise_distances(XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs)) else: - return pairwise_distances(XX, YY, metric=sklearn_wrapper(metric, X, Y, **kwargs)) + return pairwise_distances(XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs)) class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): """ @@ -209,6 +209,19 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): """ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions) + def __call__(self, diag1, diag2): + """ + Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: sliced Wasserstein distance. + """ + return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions) + class BottleneckDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams. @@ -246,6 +259,24 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon) return Xfit + def __call__(self, diag1, diag2): + """ + Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: bottleneck distance. + """ + try: + from .. import bottleneck_distance + return bottleneck_distance(diag1, diag2, e=self.epsilon) + except ImportError: + print("Gudhi built without CGAL") + raise + class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details. @@ -283,6 +314,19 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin): """ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) + def __call__(self, diag1, diag2): + """ + Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: persistence Fisher distance. + """ + return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx) + class WassersteinDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams. @@ -325,5 +369,26 @@ class WassersteinDistance(BaseEstimator, TransformerMixin): if self.metric == "hera_wasserstein": Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta) else: - Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p) + Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False) return Xfit + + def __call__(self, diag1, diag2): + """ + Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result. + + Parameters: + diag1 (n x 2 numpy array): first input persistence diagram. + diag2 (n x 2 numpy array): second input persistence diagram. + + Returns: + float: Wasserstein distance. + """ + if self.metric == "hera_wasserstein": + return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta) + else: + try: + from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance + return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False) + except ImportError: + print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'") + raise diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py index a39b00e4..a8545349 100644 --- a/src/python/gudhi/representations/preprocessing.py +++ b/src/python/gudhi/representations/preprocessing.py @@ -54,6 +54,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin): Xfit.append(new_diag) return Xfit + def __call__(self, diag): + """ + Apply BirthPersistenceTransform on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + n x 2 numpy array: transformed persistence diagram. + """ + return self.fit_transform([diag])[0] + class Clamping(BaseEstimator, TransformerMixin): """ This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams. @@ -142,6 +154,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin): Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1]))) return Xfit + def __call__(self, diag): + """ + Apply DiagramScaler on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + n x 2 numpy array: transformed persistence diagram. + """ + return self.fit_transform([diag])[0] + class Padding(BaseEstimator, TransformerMixin): """ This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points. @@ -186,6 +210,18 @@ class Padding(BaseEstimator, TransformerMixin): Xfit = X return Xfit + def __call__(self, diag): + """ + Apply Padding on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + n x 2 numpy array: padded persistence diagram. + """ + return self.fit_transform([diag])[0] + class ProminentPoints(BaseEstimator, TransformerMixin): """ This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate. @@ -259,6 +295,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin): Xfit = X return Xfit + def __call__(self, diag): + """ + Apply ProminentPoints on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + n x 2 numpy array: thresholded persistence diagram. + """ + return self.fit_transform([diag])[0] + class DiagramSelector(BaseEstimator, TransformerMixin): """ This is a class for extracting finite or essential points in persistence diagrams. @@ -303,3 +351,15 @@ class DiagramSelector(BaseEstimator, TransformerMixin): else: Xfit = X return Xfit + + def __call__(self, diag): + """ + Apply DiagramSelector on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + n x 2 numpy array: extracted persistence diagram. + """ + return self.fit_transform([diag])[0] diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py index fe26dbe2..46fee086 100644 --- a/src/python/gudhi/representations/vector_methods.py +++ b/src/python/gudhi/representations/vector_methods.py @@ -81,6 +81,18 @@ class PersistenceImage(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply PersistenceImage on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image. + """ + return self.fit_transform([diag])[0,:] + class Landscape(BaseEstimator, TransformerMixin): """ This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details. @@ -170,6 +182,18 @@ class Landscape(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply Landscape on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape. + """ + return self.fit_transform([diag])[0,:] + class Silhouette(BaseEstimator, TransformerMixin): """ This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details. @@ -248,6 +272,18 @@ class Silhouette(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply Silhouette on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (**resolution**): output persistence silhouette. + """ + return self.fit_transform([diag])[0,:] + class BettiCurve(BaseEstimator, TransformerMixin): """ This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details. @@ -308,6 +344,18 @@ class BettiCurve(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply BettiCurve on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (**resolution**): output Betti curve. + """ + return self.fit_transform([diag])[0,:] + class Entropy(BaseEstimator, TransformerMixin): """ This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros. @@ -378,6 +426,18 @@ class Entropy(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply Entropy on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy. + """ + return self.fit_transform([diag])[0,:] + class TopologicalVector(BaseEstimator, TransformerMixin): """ This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details. @@ -431,6 +491,18 @@ class TopologicalVector(BaseEstimator, TransformerMixin): return Xfit + def __call__(self, diag): + """ + Apply TopologicalVector on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (**threshold**): output topological vector. + """ + return self.fit_transform([diag])[0,:] + class ComplexPolynomial(BaseEstimator, TransformerMixin): """ This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details. @@ -490,3 +562,15 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin): coeff = np.array(coeff[::-1])[1:] Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])] return Xfit + + def __call__(self, diag): + """ + Apply ComplexPolynomial on a single persistence diagram and outputs the result. + + Parameters: + diag (n x 2 numpy array): input persistence diagram. + + Returns: + numpy array with shape (**threshold**): output complex vector of coefficients. + """ + return self.fit_transform([diag])[0,:] -- cgit v1.2.3 From c2b6d95f0b01ca913ddc704350cbfe37bcf13c3a Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Tue, 28 Apr 2020 19:28:24 -0400 Subject: update output --- .../include/gudhi/Bitmap_cubical_complex_base.h | 5 ++-- src/python/gudhi/cubical_complex.pyx | 33 ++++++++++++++++++---- src/python/gudhi/periodic_cubical_complex.pyx | 33 ++++++++++++++++++---- .../include/Persistent_cohomology_interface.h | 6 ++-- src/python/test/test_cubical_complex.py | 7 ++++- 5 files changed, 69 insertions(+), 15 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 6441c129..248ebdb6 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -110,8 +110,9 @@ class Bitmap_cubical_complex_base { virtual inline std::vector get_coboundary_of_a_cell(std::size_t cell) const; /** - * This function computes the index of one of the top-dimensional cubes (chosen arbitrarily) associated - * to a given simplex handle. Note that the input parameter is not necessarily a cube, it might also + * This function finds a top-dimensional cell that is incident to the input cell and has + * the same filtration value. In case several cells are suitable, an arbitrary one is + * returned. Note that the input parameter is not necessarily a cube, it might also * be an edge or vertex of a cube. On the other hand, the output is always indicating the position of * a cube in the data structure. **/ diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 69d0f0b6..884b0664 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -187,18 +187,41 @@ cdef class CubicalComplex: top-dimensional cells have the same filtration value, we arbitrarily return one of the two when calling the function on one of their common faces. - :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. - :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, - index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if - the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. + :returns: The top-dimensional cells/cofaces of the positive and negative cells, + together with the corresponding homological dimension, in two lists of numpy arrays of integers. + The first list contains the regular persistence pairs, grouped by dimension. + It contains numpy arrays of shape [number_of_persistence_points, 2]. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell, + index of negative top-dimensional cell). + The second list contains the essential features, grouped by dimension. + It contains numpy arrays of shape [number_of_persistence_points, 1]. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell). """ cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: + output = [[],[]] persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() + pr = np.array(persistence_result) + + ess_ind = np.argwhere(pr[:,2] == -1)[:,0] + ess = pr[ess_ind] + max_h = max(ess[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(ess[:,0] == h)[:,0] + output[1].append(ess[hidxs][:,1]) + + reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind) + reg = pr[reg_ind] + max_h = max(reg[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(reg[:,0] == h)[:,0] + output[0].append(reg[hidxs][:,1:]) else: print("cofaces_of_persistence_pairs function requires persistence function" " to be launched first.") - return np.array(persistence_result) + return output def betti_numbers(self): """This function returns the Betti numbers of the complex. diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index 78565cf8..3cf2ff01 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -192,18 +192,41 @@ cdef class PeriodicCubicalComplex: top-dimensional cells have the same filtration value, we arbitrarily return one of the two when calling the function on one of their common faces. - :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension. - :rtype: numpy array of integers of shape [number_of_persistence_points, 3], the integers of eah row being: (homological dimension, - index of positive top-dimensional cell, index of negative top-dimensional cell). If the homological feature is essential, i.e., if - the death time is +infinity, then the index of the corresponding negative top-dimensional cell is -1. + :returns: The top-dimensional cells/cofaces of the positive and negative cells, + together with the corresponding homological dimension, in two lists of numpy arrays of integers. + The first list contains the regular persistence pairs, grouped by dimension. + It contains numpy arrays of shape [number_of_persistence_points, 2]. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell, + index of negative top-dimensional cell). + The second list contains the essential features, grouped by dimension. + It contains numpy arrays of shape [number_of_persistence_points, 1]. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell). """ cdef vector[vector[int]] persistence_result if self.pcohptr != NULL: + output = [[],[]] persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() + pr = np.array(persistence_result) + + ess_ind = np.argwhere(pr[:,2] == -1)[:,0] + ess = pr[ess_ind] + max_h = max(ess[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(ess[:,0] == h)[:,0] + output[1].append(ess[hidxs][:,1]) + + reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind) + reg = pr[reg_ind] + max_h = max(reg[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(reg[:,0] == h)[:,0] + output[0].append(reg[hidxs][:,1:]) else: print("cofaces_of_persistence_pairs function requires persistence function" " to be launched first.") - return np.array(persistence_result) + return output def betti_numbers(self): """This function returns the Betti numbers of the complex. diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 59024212..32e6ee9c 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -16,6 +16,7 @@ #include #include // for std::pair #include // for sort +#include namespace Gudhi { @@ -80,8 +81,9 @@ persistent_cohomology::Persistent_cohomology order; std::sort(max_splx.begin(), max_splx.end()); - for (unsigned int i = 0; i < max_splx.size(); i++) order.insert(std::make_pair(max_splx[i], i)); + std::unordered_map order; + //std::sort(max_splx.begin(), max_splx.end()); + for (unsigned int i = 0; i < max_splx.size(); i++) order.emplace(max_splx[i], i); std::vector> persistence_pairs; for (auto pair : pairs) { diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py index dd7653c2..5c59db8f 100755 --- a/src/python/test/test_cubical_complex.py +++ b/src/python/test/test_cubical_complex.py @@ -151,4 +151,9 @@ def test_connected_sublevel_sets(): def test_cubical_generators(): cub = CubicalComplex(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) cub.persistence() - assert np.array_equal(cub.cofaces_of_persistence_pairs(), np.array([[1, 7, 4], [0, 8, -1]])) + g = cub.cofaces_of_persistence_pairs() + assert len(g[0]) == 2 + assert len(g[1]) == 1 + assert np.array_equal(g[0][0], np.empty(shape=[0,2])) + assert np.array_equal(g[0][1], np.array([[7, 4]])) + assert np.array_equal(g[1][0], np.array([8])) -- cgit v1.2.3 From 31080642b9029446efe85dabcf49145d9a7519b6 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 29 Apr 2020 19:34:54 +0200 Subject: Make size() return size_t It probably returns the biggest integer used in the whole module, it doesn't make sense that it uses a smaller type. --- src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 1eb77c9c..e6a78a6d 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -197,7 +197,7 @@ class Bitmap_cubical_complex_base { /** * Returns number of all cubes in the data structure. **/ - inline unsigned size() const { return this->data.size(); } + inline std::size_t size() const { return this->data.size(); } /** * Writing to stream operator. By using it we get the values T of cells in order in which they are stored in the -- cgit v1.2.3 From 2b5586fd60848b159fb4fa4481e61bab0e0cd766 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 29 Apr 2020 18:31:24 -0400 Subject: small modifs --- .../include/gudhi/Bitmap_cubical_complex_base.h | 4 +-- src/python/gudhi/cubical_complex.pyx | 42 +++++++++++----------- .../include/Persistent_cohomology_interface.h | 7 +++- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 248ebdb6..eaf8a0b6 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -116,7 +116,7 @@ class Bitmap_cubical_complex_base { * be an edge or vertex of a cube. On the other hand, the output is always indicating the position of * a cube in the data structure. **/ - inline int get_top_dimensional_coface_of_a_cell(int splx); + inline int get_top_dimensional_coface_of_a_cell(size_t splx); /** * This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of @@ -612,7 +612,7 @@ void Bitmap_cubical_complex_base::setup_bitmap_based_on_top_dimensional_cells } template -int Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(int splx) { +int Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(size_t splx) { if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;} else{ for (auto v : this->get_coboundary_of_a_cell(splx)){ diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 884b0664..b16a037f 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -199,28 +199,28 @@ cdef class CubicalComplex: The indices of the arrays in the list correspond to the homological dimensions, and the integers of each row in each array correspond to: (index of positive top-dimensional cell). """ + + assert self.pcohptr != NULL, "cofaces_of_persistence_pairs function requires persistence function to be launched first." + cdef vector[vector[int]] persistence_result - if self.pcohptr != NULL: - output = [[],[]] - persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() - pr = np.array(persistence_result) - - ess_ind = np.argwhere(pr[:,2] == -1)[:,0] - ess = pr[ess_ind] - max_h = max(ess[:,0])+1 - for h in range(max_h): - hidxs = np.argwhere(ess[:,0] == h)[:,0] - output[1].append(ess[hidxs][:,1]) - - reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind) - reg = pr[reg_ind] - max_h = max(reg[:,0])+1 - for h in range(max_h): - hidxs = np.argwhere(reg[:,0] == h)[:,0] - output[0].append(reg[hidxs][:,1:]) - else: - print("cofaces_of_persistence_pairs function requires persistence function" - " to be launched first.") + output = [[],[]] + persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs() + pr = np.array(persistence_result) + + ess_ind = np.argwhere(pr[:,2] == -1)[:,0] + ess = pr[ess_ind] + max_h = max(ess[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(ess[:,0] == h)[:,0] + output[1].append(ess[hidxs][:,1]) + + reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind) + reg = pr[reg_ind] + max_h = max(reg[:,0])+1 + for h in range(max_h): + hidxs = np.argwhere(reg[:,0] == h)[:,0] + output[0].append(reg[hidxs][:,1:]) + return output def betti_numbers(self): diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index c4e60a27..cec18546 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -68,11 +68,16 @@ persistent_cohomology::Persistent_cohomology> cofaces_of_cubical_persistence_pairs() { // Warning: this function is meant to be used with CubicalComplex only!! - auto pairs = persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); // Gather all top-dimensional cells and store their simplex handles -- cgit v1.2.3 From b2177e897b575e0c8d17b8ae5ed3259541a06bea Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 29 Apr 2020 19:16:50 -0400 Subject: small modifs --- src/python/doc/representations.rst | 2 +- src/python/example/diagram_vectorizations_distances_kernels.py | 4 +++- src/python/gudhi/representations/kernel_methods.py | 3 ++- src/python/gudhi/representations/metrics.py | 9 ++++----- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 11dcbcf9..041e3247 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -10,7 +10,7 @@ Representations manual This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. -A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. +A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes. A small example is provided diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py index ab7d8a16..c4a71a7a 100755 --- a/src/python/example/diagram_vectorizations_distances_kernels.py +++ b/src/python/example/diagram_vectorizations_distances_kernels.py @@ -13,7 +13,9 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]]) -proc1, proc2, proc3 = DiagramSelector(use=True, point_type="finite"), DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]), DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]) +proc1 = DiagramSelector(use=True, point_type="finite") +proc2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]) +proc3 = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]) D1 = proc3(proc2(proc1(D1))) plt.scatter(D1[:,0], D1[:,1]) diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py index edd1382a..596f4f07 100644 --- a/src/python/gudhi/representations/kernel_methods.py +++ b/src/python/gudhi/representations/kernel_methods.py @@ -67,7 +67,8 @@ def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", Parameters: X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only. - kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric. + **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module. Returns: numpy array of shape (nxm): kernel matrix. diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index a4bf19a6..ce416fb1 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -32,11 +32,9 @@ def _sliced_wasserstein_distance(D1, D2, num_directions): thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1] lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0) approx1 = np.matmul(D1, lines) - diag_proj1 = (1./2) * np.ones((2,2)) - approx_diag1 = np.matmul(np.matmul(D1, diag_proj1), lines) + approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines) approx2 = np.matmul(D2, lines) - diag_proj2 = (1./2) * np.ones((2,2)) - approx_diag2 = np.matmul(np.matmul(D2, diag_proj2), lines) + approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines) A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0) B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0) L1 = np.sum(np.abs(A-B), axis=0) @@ -143,7 +141,8 @@ def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwa Parameters: X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams. Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only. - metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a symmetric function taking two numpy arrays of shape (nx2) and (mx2) as inputs. + metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays. + **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module. Returns: numpy array of shape (nxm): distance matrix -- cgit v1.2.3 From a51f4f177e29ad5b01e58c9d8dd2560fb9b4fb19 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Thu, 30 Apr 2020 00:52:52 -0400 Subject: int to size_t --- .../include/gudhi/Bitmap_cubical_complex_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index eaf8a0b6..e0c567ae 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -116,7 +116,7 @@ class Bitmap_cubical_complex_base { * be an edge or vertex of a cube. On the other hand, the output is always indicating the position of * a cube in the data structure. **/ - inline int get_top_dimensional_coface_of_a_cell(size_t splx); + inline size_t get_top_dimensional_coface_of_a_cell(size_t splx); /** * This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of @@ -612,7 +612,7 @@ void Bitmap_cubical_complex_base::setup_bitmap_based_on_top_dimensional_cells } template -int Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(size_t splx) { +size_t Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(size_t splx) { if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;} else{ for (auto v : this->get_coboundary_of_a_cell(splx)){ -- cgit v1.2.3 From 8edcb434b45ef07828f8111dedcbed024f469314 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sun, 3 May 2020 09:04:17 +0200 Subject: Fix TBB Warning: tbb/task_scheduler_init.h is deprecated. --- .../example/rips_persistence_via_boundary_matrix.cpp | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp index db456f70..8c5742aa 100644 --- a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp +++ b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp @@ -17,10 +17,6 @@ #include -#ifdef GUDHI_USE_TBB -#include -#endif - #include #include @@ -67,11 +63,6 @@ int main(int argc, char * argv[]) { std::clog << "The complex contains " << st.num_simplices() << " simplices \n"; std::clog << " and has dimension " << st.dimension() << " \n"; -#ifdef GUDHI_USE_TBB - // Unnecessary, but clarifies which operations are parallel. - tbb::task_scheduler_init ts; -#endif - // Sort the simplices in the order of the filtration st.initialize_filtration(); int count = 0; @@ -81,10 +72,6 @@ int main(int argc, char * argv[]) { // Convert to a more convenient representation. Gudhi::Hasse_complex<> hcpx(st); -#ifdef GUDHI_USE_TBB - ts.terminate(); -#endif - // Free some space. delete &st; -- cgit v1.2.3 From ac7917ab2cbece048e554e32cc653c14440dbcc0 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 3 May 2020 20:43:11 +0200 Subject: Fewer copies and no GIL for hera Now the input arrays are not copied as long as they use a float64 data type, even if they are not contiguous. That's not important here, but I wanted an example of how to do it. More importantly, no need to hold the GIL. I was too lazy to benchmark to see if that changed anything... --- src/python/gudhi/hera.cc | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 0d562b4c..50d49c77 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -11,14 +11,24 @@ #include #include -#include +#include +#include #include // Hera -#include +#include namespace py = pybind11; -typedef py::array_t Dgm; +typedef py::array_t Dgm; + +// Get m[i,0] and m[i,1] as a pair +auto pairify(void* p, ssize_t h, ssize_t w) { + return [=](ssize_t i){ + char* birth = (char*)p + i * h; + char* death = birth + w; + return std::make_pair(*(double*)birth, *(double*)death); + }; +} double wasserstein_distance( Dgm d1, Dgm d2, @@ -27,16 +37,18 @@ double wasserstein_distance( { py::buffer_info buf1 = d1.request(); py::buffer_info buf2 = d2.request(); + + py::gil_scoped_release release; + // shape (n,2) or (0) for empty if((buf1.ndim!=2 || buf1.shape[1]!=2) && (buf1.ndim!=1 || buf1.shape[0]!=0)) throw std::runtime_error("Diagram 1 must be an array of size n x 2"); if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0)) throw std::runtime_error("Diagram 2 must be an array of size n x 2"); - typedef std::array Point; - auto p1 = (Point*)buf1.ptr; - auto p2 = (Point*)buf2.ptr; - auto diag1 = boost::make_iterator_range(p1, p1+buf1.shape[0]); - auto diag2 = boost::make_iterator_range(p2, p2+buf2.shape[0]); + auto cnt1 = boost::counting_range(0, buf1.shape[0]); + auto diag1 = boost::adaptors::transform(cnt1, pairify(buf1.ptr, buf1.strides[0], buf1.strides[1])); + auto cnt2 = boost::counting_range(0, buf2.shape[0]); + auto diag2 = boost::adaptors::transform(cnt2, pairify(buf2.ptr, buf2.strides[0], buf2.strides[1])); hera::AuctionParams params; params.wasserstein_power = wasserstein_power; -- cgit v1.2.3 From d2a9aed9ada419b7715a77322ad17ddf3535d133 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 19:23:40 +0200 Subject: Try to build with conda as brew fails --- azure-pipelines.yml | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 95b15db2..fccb7d57 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -4,35 +4,36 @@ jobs: displayName: "Build and test" timeoutInMinutes: 0 cancelTimeoutInMinutes: 60 - + pool: + vmImage: macOS-10.14 strategy: matrix: - macOSrelease: - imageName: 'macos-10.14' - CMakeBuildType: Release - customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' + Python36: + python.version: '3.6' + Python37: + python.version: '3.7' + Python38: + python.version: '3.8' - pool: - vmImage: $(imageName) - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.7' - architecture: 'x64' + - bash: echo "##vso[task.prependpath]$CONDA/bin" + displayName: Add conda to PATH + + - bash: conda create --yes --quiet --name gudhi_build_env + displayName: Create Anaconda environment - - script: | - $(customInstallation) + - bash: | + source activate gudhi_build_env + conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION git submodule update --init - python -m pip install --upgrade pip python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt displayName: 'Install build dependencies' - - script: | + - bash: | mkdir build cd build cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. make make doxygen - ctest -j 8 --output-on-failure -E sphinx # remove sphinx build as it fails + ctest -j 8 --output-on-failure # -E sphinx remove sphinx build as it fails displayName: 'Build, test and documentation generation' -- cgit v1.2.3 From 5d03351f22f2511e3f5159f19f54b21bf2a04d61 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 19:29:02 +0200 Subject: sudo ? --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index fccb7d57..b50bd91a 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,7 @@ jobs: - bash: echo "##vso[task.prependpath]$CONDA/bin" displayName: Add conda to PATH - - bash: conda create --yes --quiet --name gudhi_build_env + - bash: sudo conda create --yes --quiet --name gudhi_build_env displayName: Create Anaconda environment - bash: | -- cgit v1.2.3 From 7bd1941307033193da4c1cfcef873e69ca7f68f3 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 19:30:55 +0200 Subject: sudo ? --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b50bd91a..0fea11f6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -24,7 +24,7 @@ jobs: - bash: | source activate gudhi_build_env - conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION + sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION git submodule update --init python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt -- cgit v1.2.3 From 5ad8f41550d94988214fbf128a179d918635c3cf Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 4 May 2020 20:13:05 +0200 Subject: Add some nogil for cython --- src/python/gudhi/alpha_complex.pyx | 17 +++++--- src/python/gudhi/bottleneck.pyx | 20 ++++++--- src/python/gudhi/rips_complex.pyx | 17 ++++---- src/python/gudhi/simplex_tree.pxd | 89 +++++++++++++++++++------------------- src/python/gudhi/simplex_tree.pyx | 14 ++++-- 5 files changed, 88 insertions(+), 69 deletions(-) diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx index e04dc652..d75e374a 100644 --- a/src/python/gudhi/alpha_complex.pyx +++ b/src/python/gudhi/alpha_complex.pyx @@ -27,11 +27,11 @@ __license__ = "GPL v3" cdef extern from "Alpha_complex_interface.h" namespace "Gudhi": cdef cppclass Alpha_complex_interface "Gudhi::alpha_complex::Alpha_complex_interface": - Alpha_complex_interface(vector[vector[double]] points) except + + Alpha_complex_interface(vector[vector[double]] points) nogil except + # bool from_file is a workaround for cython to find the correct signature - Alpha_complex_interface(string off_file, bool from_file) except + - vector[double] get_point(int vertex) except + - void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) except + + Alpha_complex_interface(string off_file, bool from_file) nogil except + + vector[double] get_point(int vertex) nogil except + + void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) nogil except + # AlphaComplex python interface cdef class AlphaComplex: @@ -70,6 +70,7 @@ cdef class AlphaComplex: # The real cython constructor def __cinit__(self, points = None, off_file = ''): + cdef vector[vector[double]] pts if off_file: if os.path.isfile(off_file): self.thisptr = new Alpha_complex_interface( @@ -80,7 +81,9 @@ cdef class AlphaComplex: if points is None: # Empty Alpha construction points=[] - self.thisptr = new Alpha_complex_interface(points) + pts = points + with nogil: + self.thisptr = new Alpha_complex_interface(pts) def __dealloc__(self): @@ -113,6 +116,8 @@ cdef class AlphaComplex: :rtype: SimplexTree """ stree = SimplexTree() + cdef double mas = max_alpha_square cdef intptr_t stree_int_ptr=stree.thisptr - self.thisptr.create_simplex_tree(stree_int_ptr, max_alpha_square) + with nogil: + self.thisptr.create_simplex_tree(stree_int_ptr, mas) return stree diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx index af011e88..6a88895e 100644 --- a/src/python/gudhi/bottleneck.pyx +++ b/src/python/gudhi/bottleneck.pyx @@ -17,8 +17,8 @@ __copyright__ = "Copyright (C) 2016 Inria" __license__ = "GPL v3" cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram": - double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double) - double bottleneck(vector[pair[double, double]], vector[pair[double, double]]) + double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double) nogil + double bottleneck(vector[pair[double, double]], vector[pair[double, double]]) nogil def bottleneck_distance(diagram_1, diagram_2, e=None): """This function returns the point corresponding to a given vertex. @@ -40,9 +40,17 @@ def bottleneck_distance(diagram_1, diagram_2, e=None): :rtype: float :returns: the bottleneck distance. """ + cdef vector[pair[double, double]] dgm1 = diagram_1 + cdef vector[pair[double, double]] dgm2 = diagram_2 + cdef double eps + cdef double ret if e is None: - # Default value is the smallest double value (not 0, 0 is for exact version) - return bottleneck(diagram_1, diagram_2) + with nogil: + # Default value is the smallest double value (not 0, 0 is for exact version) + ret = bottleneck(dgm1, dgm2) else: - # Can be 0 for exact version - return bottleneck(diagram_1, diagram_2, e) + eps = e + with nogil: + # Can be 0 for exact version + ret = bottleneck(dgm1, dgm2, eps) + return ret diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx index deb8057a..72e82c79 100644 --- a/src/python/gudhi/rips_complex.pyx +++ b/src/python/gudhi/rips_complex.pyx @@ -23,12 +23,12 @@ __license__ = "MIT" cdef extern from "Rips_complex_interface.h" namespace "Gudhi": cdef cppclass Rips_complex_interface "Gudhi::rips_complex::Rips_complex_interface": - Rips_complex_interface() - void init_points(vector[vector[double]] values, double threshold) - void init_matrix(vector[vector[double]] values, double threshold) - void init_points_sparse(vector[vector[double]] values, double threshold, double sparse) - void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse) - void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) except + + Rips_complex_interface() nogil + void init_points(vector[vector[double]] values, double threshold) nogil + void init_matrix(vector[vector[double]] values, double threshold) nogil + void init_points_sparse(vector[vector[double]] values, double threshold, double sparse) nogil + void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse) nogil + void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) nogil except + # RipsComplex python interface cdef class RipsComplex: @@ -97,6 +97,7 @@ cdef class RipsComplex: """ stree = SimplexTree() cdef intptr_t stree_int_ptr=stree.thisptr - self.thisref.create_simplex_tree(stree_int_ptr, - max_dimension) + cdef int maxdim = max_dimension + with nogil: + self.thisref.create_simplex_tree(stree_int_ptr, maxdim) return stree diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 1d4ed926..e748ac40 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -25,57 +25,56 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": pass cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface::Complex_simplex_iterator": - Simplex_tree_simplices_iterator() - Simplex_tree_simplex_handle& operator*() - Simplex_tree_simplices_iterator operator++() - bint operator!=(Simplex_tree_simplices_iterator) + Simplex_tree_simplices_iterator() nogil + Simplex_tree_simplex_handle& operator*() nogil + Simplex_tree_simplices_iterator operator++() nogil + bint operator!=(Simplex_tree_simplices_iterator) nogil cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface::Skeleton_simplex_iterator": - Simplex_tree_skeleton_iterator() - Simplex_tree_simplex_handle& operator*() - Simplex_tree_skeleton_iterator operator++() - bint operator!=(Simplex_tree_skeleton_iterator) + Simplex_tree_skeleton_iterator() nogil + Simplex_tree_simplex_handle& operator*() nogil + Simplex_tree_skeleton_iterator operator++() nogil + bint operator!=(Simplex_tree_skeleton_iterator) nogil cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface": - Simplex_tree() - double simplex_filtration(vector[int] simplex) - void assign_simplex_filtration(vector[int] simplex, double filtration) - void initialize_filtration() - int num_vertices() - int num_simplices() - void set_dimension(int dimension) - int dimension() - int upper_bound_dimension() - bool find_simplex(vector[int] simplex) - bool insert(vector[int] simplex, double filtration) - vector[pair[vector[int], double]] get_star(vector[int] simplex) - vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, - int dimension) - void expansion(int max_dim) except + - void remove_maximal_simplex(vector[int] simplex) - bool prune_above_filtration(double filtration) - bool make_filtration_non_decreasing() - void compute_extended_filtration() - vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) + Simplex_tree() nogil + double simplex_filtration(vector[int] simplex) nogil + void assign_simplex_filtration(vector[int] simplex, double filtration) nogil + void initialize_filtration() nogil + int num_vertices() nogil + int num_simplices() nogil + void set_dimension(int dimension) nogil + int dimension() nogil + int upper_bound_dimension() nogil + bool find_simplex(vector[int] simplex) nogil + bool insert(vector[int] simplex, double filtration) nogil + vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil + vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil + void expansion(int max_dim) nogil except + + void remove_maximal_simplex(vector[int] simplex) nogil + bool prune_above_filtration(double filtration) nogil + bool make_filtration_non_decreasing() nogil + void compute_extended_filtration() nogil + vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil # Iterators over Simplex tree - pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) - Simplex_tree_simplices_iterator get_simplices_iterator_begin() - Simplex_tree_simplices_iterator get_simplices_iterator_end() - vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() - vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() - Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) - Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) + pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil + Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil + Simplex_tree_simplices_iterator get_simplices_iterator_end() nogil + vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() nogil + vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() nogil + Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) nogil + Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) nogil cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface>": - Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) - void compute_persistence(int homology_coeff_field, double min_persistence) - vector[pair[int, pair[double, double]]] get_persistence() - vector[int] betti_numbers() - vector[int] persistent_betti_numbers(double from_value, double to_value) - vector[pair[double,double]] intervals_in_dimension(int dimension) - void write_output_diagram(string diagram_file_name) except + - vector[pair[vector[int], vector[int]]] persistence_pairs() - pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() - pair[vector[vector[int]], vector[vector[int]]] flag_generators() + Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil + void compute_persistence(int homology_coeff_field, double min_persistence) nogil + vector[pair[int, pair[double, double]]] get_persistence() nogil + vector[int] betti_numbers() nogil + vector[int] persistent_betti_numbers(double from_value, double to_value) nogil + vector[pair[double,double]] intervals_in_dimension(int dimension) nogil + void write_output_diagram(string diagram_file_name) nogil except + + vector[pair[vector[int], vector[int]]] persistence_pairs() nogil + pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() nogil + pair[vector[vector[int]], vector[vector[int]]] flag_generators() nogil diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 55115cca..e8e4943c 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -33,7 +33,7 @@ cdef class SimplexTree: cdef public intptr_t thisptr # Get the pointer casted as it should be - cdef Simplex_tree_interface_full_featured* get_ptr(self): + cdef Simplex_tree_interface_full_featured* get_ptr(self) nogil: return (self.thisptr) cdef Simplex_tree_persistence_interface * pcohptr @@ -343,7 +343,9 @@ cdef class SimplexTree: :param max_dim: The maximal dimension. :type max_dim: int. """ - self.get_ptr().expansion(max_dim) + cdef int maxdim = max_dim + with nogil: + self.get_ptr().expansion(maxdim) def make_filtration_non_decreasing(self): """This function ensures that each simplex has a higher filtration @@ -449,8 +451,12 @@ cdef class SimplexTree: """ if self.pcohptr != NULL: del self.pcohptr - self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max) - self.pcohptr.compute_persistence(homology_coeff_field, min_persistence) + cdef bool pdm = persistence_dim_max + cdef int coef = homology_coeff_field + cdef double minp = min_persistence + with nogil: + self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), pdm) + self.pcohptr.compute_persistence(coef, minp) def betti_numbers(self): """This function returns the Betti numbers of the simplicial complex. -- cgit v1.2.3 From 62139c92181b7f405ce0e36ef6b46777cee85b34 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 22:22:26 +0200 Subject: Add conda build requirements --- azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0fea11f6..97c84136 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -25,6 +25,7 @@ jobs: - bash: | source activate gudhi_build_env sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION + sudo conda install --yes -c conda-forge doxygen eigen boost-cpp=1.70.0 cgal-cpp>=5.0 git submodule update --init python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt -- cgit v1.2.3 From b880228fb423aeb3d662416fbb477d3ced100e08 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 22:31:13 +0200 Subject: Need to activate conda env to build --- azure-pipelines.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 97c84136..2fcff411 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -10,10 +10,10 @@ jobs: matrix: Python36: python.version: '3.6' - Python37: - python.version: '3.7' - Python38: - python.version: '3.8' + #Python37: + # python.version: '3.7' + #Python38: + # python.version: '3.8' steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" @@ -26,11 +26,12 @@ jobs: source activate gudhi_build_env sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION sudo conda install --yes -c conda-forge doxygen eigen boost-cpp=1.70.0 cgal-cpp>=5.0 - git submodule update --init python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt displayName: 'Install build dependencies' - bash: | + source activate gudhi_build_env + git submodule update --init mkdir build cd build cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. -- cgit v1.2.3 From 71d958891cc638b26541ca5cf6c569b43332d2b6 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 4 May 2020 22:39:10 +0200 Subject: conda update and release cmake version --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2fcff411..b3b0ea7f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -10,6 +10,7 @@ jobs: matrix: Python36: python.version: '3.6' + CMakeBuildType: Release #Python37: # python.version: '3.7' #Python38: @@ -25,6 +26,7 @@ jobs: - bash: | source activate gudhi_build_env sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION + sudo conda update --yes --quiet -n base -c defaults conda sudo conda install --yes -c conda-forge doxygen eigen boost-cpp=1.70.0 cgal-cpp>=5.0 python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt -- cgit v1.2.3 From 03b8322e9ded09cc879867008d32baa3a91a45e5 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 5 May 2020 07:05:45 +0200 Subject: brew install cgal & Cie instead of conda install because of link issue --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b3b0ea7f..3ab2f112 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -11,6 +11,7 @@ jobs: Python36: python.version: '3.6' CMakeBuildType: Release + customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' #Python37: # python.version: '3.7' #Python38: @@ -26,10 +27,9 @@ jobs: - bash: | source activate gudhi_build_env sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION - sudo conda update --yes --quiet -n base -c defaults conda - sudo conda install --yes -c conda-forge doxygen eigen boost-cpp=1.70.0 cgal-cpp>=5.0 python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt + $(customInstallation) displayName: 'Install build dependencies' - bash: | source activate gudhi_build_env -- cgit v1.2.3 From 8da9158e9a2ffb128eb1b5b05d4e8574ff70d771 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 5 May 2020 07:48:16 +0200 Subject: Remove sphinx test and matrix --- azure-pipelines.yml | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3ab2f112..7b5334a7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -6,16 +6,10 @@ jobs: cancelTimeoutInMinutes: 60 pool: vmImage: macOS-10.14 - strategy: - matrix: - Python36: - python.version: '3.6' - CMakeBuildType: Release - customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' - #Python37: - # python.version: '3.7' - #Python38: - # python.version: '3.8' + variables: + pythonVersion: '3.6' + cmakeBuildType: Release + customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal' steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" @@ -26,7 +20,7 @@ jobs: - bash: | source activate gudhi_build_env - sudo conda install --yes --quiet --name gudhi_build_env python=$PYTHON_VERSION + sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion) python -m pip install --user -r .github/build-requirements.txt python -m pip install --user -r .github/test-requirements.txt $(customInstallation) @@ -36,8 +30,8 @@ jobs: git submodule update --init mkdir build cd build - cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. - make + cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. + make -j 4 make doxygen - ctest -j 8 --output-on-failure # -E sphinx remove sphinx build as it fails + ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails displayName: 'Build, test and documentation generation' -- cgit v1.2.3 From 99549c20e9173b536ac816ab683bc13025f182a2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 5 May 2020 11:07:53 +0200 Subject: fix use of threads and n_jobs in Parallel --- src/python/gudhi/point_cloud/knn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 07553d6d..34e80b5d 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -200,8 +200,8 @@ class KNearestNeighbors: from joblib import Parallel, delayed, effective_n_jobs from sklearn.utils import gen_even_slices - slices = gen_even_slices(len(X), effective_n_jobs(-1)) - parallel = Parallel(backend="threading", n_jobs=-1) + slices = gen_even_slices(len(X), effective_n_jobs(n_jobs)) + parallel = Parallel(prefer="threads", n_jobs=n_jobs) if self.params.get("sort_results", True): def func(M): @@ -242,8 +242,8 @@ class KNearestNeighbors: else: func = lambda M: numpy.partition(M, k - 1)[:, 0:k] - slices = gen_even_slices(len(X), effective_n_jobs(-1)) - parallel = Parallel(backend="threading", n_jobs=-1) + slices = gen_even_slices(len(X), effective_n_jobs(n_jobs)) + parallel = Parallel(prefer="threads", n_jobs=n_jobs) distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices)) return distances return None -- cgit v1.2.3 From dac92c5ae9da6aa21fdcd261737e08d6898dbbdc Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 6 May 2020 12:54:21 +0200 Subject: Avoid reading outside of allocated region The result was unused, but better be safe. --- src/python/gudhi/hera.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 50d49c77..63bbb075 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -45,10 +45,12 @@ double wasserstein_distance( throw std::runtime_error("Diagram 1 must be an array of size n x 2"); if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0)) throw std::runtime_error("Diagram 2 must be an array of size n x 2"); + ssize_t stride11 = buf1.ndim == 2 ? buf1.strides[1] : 0; + ssize_t stride21 = buf2.ndim == 2 ? buf2.strides[1] : 0; auto cnt1 = boost::counting_range(0, buf1.shape[0]); - auto diag1 = boost::adaptors::transform(cnt1, pairify(buf1.ptr, buf1.strides[0], buf1.strides[1])); + auto diag1 = boost::adaptors::transform(cnt1, pairify(buf1.ptr, buf1.strides[0], stride11)); auto cnt2 = boost::counting_range(0, buf2.shape[0]); - auto diag2 = boost::adaptors::transform(cnt2, pairify(buf2.ptr, buf2.strides[0], buf2.strides[1])); + auto diag2 = boost::adaptors::transform(cnt2, pairify(buf2.ptr, buf2.strides[0], stride21)); hera::AuctionParams params; params.wasserstein_power = wasserstein_power; -- cgit v1.2.3 From 5c5e2c3075235079fda94fc6a159cc5275f85a0c Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 6 May 2020 14:13:14 +0200 Subject: Refactor the numpy -> C++ range conversion If we want to reuse it for bottleneck... --- src/python/gudhi/hera.cc | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 63bbb075..5aec1806 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -22,7 +22,7 @@ namespace py = pybind11; typedef py::array_t Dgm; // Get m[i,0] and m[i,1] as a pair -auto pairify(void* p, ssize_t h, ssize_t w) { +static auto pairify(void* p, ssize_t h, ssize_t w) { return [=](ssize_t i){ char* birth = (char*)p + i * h; char* death = birth + w; @@ -30,28 +30,29 @@ auto pairify(void* p, ssize_t h, ssize_t w) { }; } +inline auto numpy_to_range_of_pairs(py::array_t dgm) { + py::buffer_info buf = dgm.request(); + // shape (n,2) or (0) for empty + if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0)) + throw std::runtime_error("Diagram must be an array of size n x 2"); + // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it. + ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0; + auto cnt = boost::counting_range(0, buf.shape[0]); + return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1)); + // Be careful that the returned range cannot contain references to dead temporaries. +} + double wasserstein_distance( Dgm d1, Dgm d2, double wasserstein_power, double internal_p, double delta) { - py::buffer_info buf1 = d1.request(); - py::buffer_info buf2 = d2.request(); + // I *think* the call to request() has to be before releasing the GIL. + auto diag1 = numpy_to_range_of_pairs(d1); + auto diag2 = numpy_to_range_of_pairs(d2); py::gil_scoped_release release; - // shape (n,2) or (0) for empty - if((buf1.ndim!=2 || buf1.shape[1]!=2) && (buf1.ndim!=1 || buf1.shape[0]!=0)) - throw std::runtime_error("Diagram 1 must be an array of size n x 2"); - if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0)) - throw std::runtime_error("Diagram 2 must be an array of size n x 2"); - ssize_t stride11 = buf1.ndim == 2 ? buf1.strides[1] : 0; - ssize_t stride21 = buf2.ndim == 2 ? buf2.strides[1] : 0; - auto cnt1 = boost::counting_range(0, buf1.shape[0]); - auto diag1 = boost::adaptors::transform(cnt1, pairify(buf1.ptr, buf1.strides[0], stride11)); - auto cnt2 = boost::counting_range(0, buf2.shape[0]); - auto diag2 = boost::adaptors::transform(cnt2, pairify(buf2.ptr, buf2.strides[0], stride21)); - hera::AuctionParams params; params.wasserstein_power = wasserstein_power; // hera encodes infinity as -1... -- cgit v1.2.3 From 47e5ac79af3a354358515c0213b28848f878fde6 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 6 May 2020 22:59:36 +0200 Subject: Reimplement the bottleneck python wrapper with pybind11 --- src/python/CMakeLists.txt | 33 ++++++++++--------- src/python/gudhi/bottleneck.cc | 51 +++++++++++++++++++++++++++++ src/python/gudhi/bottleneck.pyx | 48 --------------------------- src/python/gudhi/hera.cc | 32 +----------------- src/python/include/pybind11_diagram_utils.h | 39 ++++++++++++++++++++++ src/python/setup.py.in | 19 +++++++++-- 6 files changed, 125 insertions(+), 97 deletions(-) create mode 100644 src/python/gudhi/bottleneck.cc delete mode 100644 src/python/gudhi/bottleneck.pyx create mode 100644 src/python/include/pybind11_diagram_utils.h diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d712e189..976a8b52 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -34,6 +34,7 @@ endfunction( add_gudhi_debug_info ) if(PYTHONINTERP_FOUND) if(PYBIND11_FOUND) add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}") + set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") endif() if(CYTHON_FOUND) @@ -46,7 +47,6 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'reader_utils', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'witness_complex', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'strong_witness_complex', ") - set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'nerve_gic', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'subsampling', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'tangential_complex', ") @@ -120,24 +120,25 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ") endif (EIGEN3_FOUND) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'off_reader', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'simplex_tree', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'rips_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'cubical_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'periodic_cubical_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'reader_utils', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'witness_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'strong_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'simplex_tree', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'rips_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'cubical_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'periodic_cubical_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'reader_utils', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'strong_witness_complex', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'bottleneck', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ") endif () if(CGAL_FOUND) diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc new file mode 100644 index 00000000..577e5e0b --- /dev/null +++ b/src/python/gudhi/bottleneck.cc @@ -0,0 +1,51 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Marc Glisse + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include + +#include + +double bottleneck(Dgm d1, Dgm d2, double epsilon) +{ + // I *think* the call to request() has to be before releasing the GIL. + auto diag1 = numpy_to_range_of_pairs(d1); + auto diag2 = numpy_to_range_of_pairs(d2); + + py::gil_scoped_release release; + + return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, epsilon); +} + +PYBIND11_MODULE(bottleneck, m) { + m.attr("__license__") = "GPL v3"; + m.def("bottleneck_distance", &bottleneck, + py::arg("diagram_1"), py::arg("diagram_2"), + py::arg("e") = (std::numeric_limits::min)(), + R"pbdoc( + This function returns the point corresponding to a given vertex. + + :param diagram_1: The first diagram. + :type diagram_1: vector[pair[double, double]] + :param diagram_2: The second diagram. + :type diagram_2: vector[pair[double, double]] + :param e: If `e` is 0, this uses an expensive algorithm to compute the + exact distance. + If `e` is not 0, it asks for an additive `e`-approximation, and + currently also allows a small multiplicative error (the last 2 or 3 + bits of the mantissa may be wrong). This version of the algorithm takes + advantage of the limited precision of `double` and is usually a lot + faster to compute, whatever the value of `e`. + + Thus, by default, `e` is the smallest positive double. + :type e: float + :rtype: float + :returns: the bottleneck distance. + )pbdoc"); +} diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx deleted file mode 100644 index af011e88..00000000 --- a/src/python/gudhi/bottleneck.pyx +++ /dev/null @@ -1,48 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Vincent Rouvreau -# -# Copyright (C) 2016 Inria -# -# Modification(s): -# - YYYY/MM Author: Description of the modification - -from cython cimport numeric -from libcpp.vector cimport vector -from libcpp.utility cimport pair -import os - -__author__ = "Vincent Rouvreau" -__copyright__ = "Copyright (C) 2016 Inria" -__license__ = "GPL v3" - -cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram": - double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double) - double bottleneck(vector[pair[double, double]], vector[pair[double, double]]) - -def bottleneck_distance(diagram_1, diagram_2, e=None): - """This function returns the point corresponding to a given vertex. - - :param diagram_1: The first diagram. - :type diagram_1: vector[pair[double, double]] - :param diagram_2: The second diagram. - :type diagram_2: vector[pair[double, double]] - :param e: If `e` is 0, this uses an expensive algorithm to compute the - exact distance. - If `e` is not 0, it asks for an additive `e`-approximation, and - currently also allows a small multiplicative error (the last 2 or 3 - bits of the mantissa may be wrong). This version of the algorithm takes - advantage of the limited precision of `double` and is usually a lot - faster to compute, whatever the value of `e`. - - Thus, by default, `e` is the smallest positive double. - :type e: float - :rtype: float - :returns: the bottleneck distance. - """ - if e is None: - # Default value is the smallest double value (not 0, 0 is for exact version) - return bottleneck(diagram_1, diagram_2) - else: - # Can be 0 for exact version - return bottleneck(diagram_1, diagram_2, e) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 5aec1806..ea80a9a8 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -8,39 +8,9 @@ * - YYYY/MM Author: Description of the modification */ -#include -#include - -#include -#include - #include // Hera -#include - -namespace py = pybind11; -typedef py::array_t Dgm; - -// Get m[i,0] and m[i,1] as a pair -static auto pairify(void* p, ssize_t h, ssize_t w) { - return [=](ssize_t i){ - char* birth = (char*)p + i * h; - char* death = birth + w; - return std::make_pair(*(double*)birth, *(double*)death); - }; -} - -inline auto numpy_to_range_of_pairs(py::array_t dgm) { - py::buffer_info buf = dgm.request(); - // shape (n,2) or (0) for empty - if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0)) - throw std::runtime_error("Diagram must be an array of size n x 2"); - // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it. - ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0; - auto cnt = boost::counting_range(0, buf.shape[0]); - return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1)); - // Be careful that the returned range cannot contain references to dead temporaries. -} +#include double wasserstein_distance( Dgm d1, Dgm d2, diff --git a/src/python/include/pybind11_diagram_utils.h b/src/python/include/pybind11_diagram_utils.h new file mode 100644 index 00000000..d9627258 --- /dev/null +++ b/src/python/include/pybind11_diagram_utils.h @@ -0,0 +1,39 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Marc Glisse + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include +#include + +namespace py = pybind11; +typedef py::array_t Dgm; + +// Get m[i,0] and m[i,1] as a pair +static auto pairify(void* p, ssize_t h, ssize_t w) { + return [=](ssize_t i){ + char* birth = (char*)p + i * h; + char* death = birth + w; + return std::make_pair(*(double*)birth, *(double*)death); + }; +} + +inline auto numpy_to_range_of_pairs(py::array_t dgm) { + py::buffer_info buf = dgm.request(); + // shape (n,2) or (0) for empty + if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0)) + throw std::runtime_error("Diagram must be an array of size n x 2"); + // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it. + ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0; + auto cnt = boost::counting_range(0, buf.shape[0]); + return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1)); + // Be careful that the returned range cannot contain references to dead temporaries. +} diff --git a/src/python/setup.py.in b/src/python/setup.py.in index f968bd59..852da910 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -18,7 +18,8 @@ __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" __license__ = "MIT" -modules = [@GUDHI_PYTHON_MODULES_TO_COMPILE@] +cython_modules = [@GUDHI_CYTHON_MODULES@] +pybind11_modules = [@GUDHI_PYBIND11_MODULES@] source_dir='@CMAKE_CURRENT_SOURCE_DIR@/gudhi/' extra_compile_args=[@GUDHI_PYTHON_EXTRA_COMPILE_ARGS@] @@ -30,7 +31,7 @@ runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@] # Create ext_modules list from module list ext_modules = [] -for module in modules: +for module in cython_modules: ext_modules.append(Extension( 'gudhi.' + module, sources = [source_dir + module + '.pyx',], @@ -55,6 +56,20 @@ ext_modules.append(Extension( extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], )) +if "bottleneck" in pybind11_modules: + ext_modules.append(Extension( + 'gudhi.bottleneck', + sources = [source_dir + 'bottleneck.cc'], + language = 'c++', + include_dirs = include_dirs + + [pybind11.get_include(False), pybind11.get_include(True)], + extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], + extra_link_args=extra_link_args, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=runtime_library_dirs, + )) + setup( name = 'gudhi', packages=find_packages(), # find_namespace_packages(include=["gudhi*"]) -- cgit v1.2.3 From d61bfd349274456f8d7e0ccd64839a2d84eea0a0 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 7 May 2020 08:40:55 +0200 Subject: doc --- src/python/gudhi/bottleneck.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc index 577e5e0b..732cb9a8 100644 --- a/src/python/gudhi/bottleneck.cc +++ b/src/python/gudhi/bottleneck.cc @@ -32,9 +32,9 @@ PYBIND11_MODULE(bottleneck, m) { This function returns the point corresponding to a given vertex. :param diagram_1: The first diagram. - :type diagram_1: vector[pair[double, double]] + :type diagram_1: numpy array of shape (m,2) :param diagram_2: The second diagram. - :type diagram_2: vector[pair[double, double]] + :type diagram_2: numpy array of shape (n,2) :param e: If `e` is 0, this uses an expensive algorithm to compute the exact distance. If `e` is not 0, it asks for an additive `e`-approximation, and @@ -42,7 +42,6 @@ PYBIND11_MODULE(bottleneck, m) { bits of the mantissa may be wrong). This version of the algorithm takes advantage of the limited precision of `double` and is usually a lot faster to compute, whatever the value of `e`. - Thus, by default, `e` is the smallest positive double. :type e: float :rtype: float -- cgit v1.2.3 From acc76eb90b8cfe3f8cbb8d30f101c7f879ab61c4 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 7 May 2020 20:10:46 +0200 Subject: Warn for initialize_filtration --- src/python/gudhi/simplex_tree.pyx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index 55115cca..b23885b4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -101,6 +101,8 @@ cdef class SimplexTree: .. deprecated:: 3.2.0 """ + import warnings + warnings.warn("Since Gudhi 3.2, calling SimplexTree.initialize_filtration is unnecessary.", DeprecationWarning) self.get_ptr().initialize_filtration() def num_vertices(self): -- cgit v1.2.3 From 778c0af7dea0c103db85986fe2e2eb5fddd7588f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 8 May 2020 10:14:50 +0200 Subject: Loop on pybind11 modules --- src/python/setup.py.in | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/src/python/setup.py.in b/src/python/setup.py.in index 852da910..b9f4e3f0 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -46,23 +46,15 @@ for module in cython_modules: ext_modules = cythonize(ext_modules) -ext_modules.append(Extension( - 'gudhi.hera', - sources = [source_dir + 'hera.cc'], - language = 'c++', - include_dirs = include_dirs + - ['@HERA_WASSERSTEIN_INCLUDE_DIR@', - pybind11.get_include(False), pybind11.get_include(True)], - extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], - )) - -if "bottleneck" in pybind11_modules: +for module in pybind11_modules: + my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)] + if module == 'hera': + my_include_dirs = ['@HERA_WASSERSTEIN_INCLUDE_DIR@'] + my_include_dirs ext_modules.append(Extension( - 'gudhi.bottleneck', - sources = [source_dir + 'bottleneck.cc'], + 'gudhi.' + module, + sources = [source_dir + module + '.cc'], language = 'c++', - include_dirs = include_dirs + - [pybind11.get_include(False), pybind11.get_include(True)], + include_dirs = my_include_dirs, extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], extra_link_args=extra_link_args, libraries=libraries, -- cgit v1.2.3 From 5040c75893cb864f5e780b6644b8097f7beeb3a6 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Mon, 11 May 2020 10:45:02 +0900 Subject: document and comments added, weights modified --- src/python/doc/rips_complex_ref.rst | 51 +++++++++++++++++++++++++++++++ src/python/gudhi/weighted_rips_complex.py | 18 ++++++----- src/python/test/test_weighted_rips.py | 2 +- 3 files changed, 63 insertions(+), 8 deletions(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 22b5616c..8fc7e1b0 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -12,3 +12,54 @@ Rips complex reference manual :show-inheritance: .. automethod:: gudhi.RipsComplex.__init__ + +====================================== +Weighted Rips complex reference manual +====================================== + +.. autoclass:: gudhi.WeightedRipsComplex + :members: + :undoc-members: + :show-inheritance: + + .. automethod:: gudhi.WeightedRipsComplex.__init__ + +Basic examples +------------- + +The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices. + +.. testcode:: + + from gudhi.weighted_rips_complex import WeightedRipsComplex + dist = [[], [1]] + weights = [1, 100] + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights) + st = w_rips.create_simplex_tree(max_dimension=2) + print(st.get_filtration()) + +The output is: + +.. testoutput:: + + [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)] + +Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook `_. + +.. testcode:: + + import numpy as np + from scipy.spatial.distance import cdist + from gudhi.point_cloud.dtm import DistanceToMeasure + from gudhi.weighted_rips_complex import WeightedRipsComplex + pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]]) + dist = cdist(pts,pts) + dtm = DistanceToMeasure(2, q=2, metric="precomputed") + r = dtm.fit_transform(dist) + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r) + st = w_rips.create_simplex_tree(max_dimension=2) + print(st.persistence()) + +.. testoutput:: + + [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))] diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 83fa82c5..7401c428 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -11,23 +11,26 @@ from gudhi import SimplexTree class WeightedRipsComplex: """ - Class to generate a weighted Rips complex from a distance matrix and weights on vertices. + Class to generate a weighted Rips complex from a distance matrix and weights on vertices, + in the way described in the paper 'DTM-based filtrations' https://arxiv.org/abs/1811.04757. + Remark that the filtration value of a vertex is twice of its weight for the consistency with + RipsComplex, which is different from the definition in the paper. """ def __init__(self, distance_matrix, - weights="diagonal", + weights=None, max_filtration=float('inf')): """ Args: - distance_matrix (list of list of float): distance matrix (full square or lower triangular). - weights (list of float): (one half of) weight for each vertex. + distance_matrix (Sequence[Sequence[float]]): distance matrix (full square or lower triangular). + weights (Sequence[float]): (one half of) weight for each vertex. max_filtration (float): specifies the maximal filtration value to be considered. """ self.distance_matrix = distance_matrix - if weights == "diagonal": - self.weights = [distance_matrix[i][i] for i in range(len(distance_matrix))] - else: + if weights is not None: self.weights = weights + else: + self.weights = [0] * len(distance_matrix) self.max_filtration = max_filtration def create_simplex_tree(self, max_dimension): @@ -47,6 +50,7 @@ class WeightedRipsComplex: for i in range(num_pts): for j in range(i): value = max(2*F[i], 2*F[j], dist[i][j] + F[i] + F[j]) + # max is needed when F is not 1-Lipschitz if value <= self.max_filtration: st.insert([i,j], filtration=value) diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py index d3721115..59ec022a 100644 --- a/src/python/test/test_weighted_rips.py +++ b/src/python/test/test_weighted_rips.py @@ -51,7 +51,7 @@ def test_compatibility_with_filtered_rips(): assert st.num_vertices() == 4 def test_dtm_rips_complex(): - pts = np.array([[2.0, 2], [0, 1], [3, 4]]) + pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]]) dist = cdist(pts,pts) dtm = DistanceToMeasure(2, q=2, metric="precomputed") r = dtm.fit_transform(dist) -- cgit v1.2.3 From 0ed4c3bba47d1375acb49596db2c863c38e9a090 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 May 2020 08:39:11 +0200 Subject: Fix #299 --- src/python/doc/alpha_complex_sum.inc | 28 ++++---- src/python/doc/cubical_complex_user.rst | 4 +- src/python/doc/fileformats.rst | 2 - src/python/doc/installation.rst | 84 +++++++++++++--------- src/python/doc/nerve_gic_complex_user.rst | 2 +- src/python/doc/persistence_graphical_tools_sum.inc | 22 +++--- .../doc/persistence_graphical_tools_user.rst | 9 +-- src/python/doc/point_cloud.rst | 2 + src/python/doc/point_cloud_sum.inc | 21 +++--- src/python/doc/representations_sum.inc | 22 +++--- src/python/doc/wasserstein_distance_user.rst | 15 +++- src/python/gudhi/persistence_graphical_tools.py | 18 ++--- src/python/gudhi/point_cloud/knn.py | 4 ++ src/python/gudhi/point_cloud/timedelay.py | 5 +- src/python/gudhi/representations/metrics.py | 4 +- 15 files changed, 135 insertions(+), 107 deletions(-) diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc index 9e6414d0..74331333 100644 --- a/src/python/doc/alpha_complex_sum.inc +++ b/src/python/doc/alpha_complex_sum.inc @@ -1,17 +1,17 @@ .. table:: :widths: 30 40 30 - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | - | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | | - | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 | - | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | | - | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 `_) | - | | simplex is then said to be Gabriel), and as the minimum of the | | - | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | - | | Gabriel otherwise. | | - | | | | - | | For performances reasons, it is advised to use CGAL ≥ 5.0.0. | | - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` | - +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------+-------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | + | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | | + | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 | + | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | | + | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 `_) | + | | simplex is then said to be Gabriel), and as the minimum of the | | + | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | + | | Gabriel otherwise. | | + | | | | + | | For performances reasons, it is advised to use CGAL :math:`\geq` 5.0.0. | | + +----------------------------------------------------------------+-------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` | + +----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index e4733653..e6e61d75 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -91,7 +91,7 @@ Currently one input from a text file is used. It uses a format inspired from the we allow any filtration values. As a consequence one cannot use ``-1``'s to indicate missing cubes. If you have missing cubes in your complex, please set their filtration to :math:`+\infty` (aka. ``inf`` in the file). -The file format is described in details in :ref:`Perseus file format` file format section. +The file format is described in details in `Perseus file format `__ section. .. testcode:: @@ -120,7 +120,7 @@ conditions are imposed in all directions, then complex :math:`\mathcal{K}` becam various constructors from the file Bitmap_cubical_complex_periodic_boundary_conditions_base.h to construct cubical complex with periodic boundary conditions. -One can also use Perseus style input files (see :doc:`Perseus `) for the specific periodic case: +One can also use Perseus style input files (see `Perseus file format `__) for the specific periodic case: .. testcode:: diff --git a/src/python/doc/fileformats.rst b/src/python/doc/fileformats.rst index 345dfdba..ae1b00f3 100644 --- a/src/python/doc/fileformats.rst +++ b/src/python/doc/fileformats.rst @@ -80,8 +80,6 @@ Here is a simple sample file in the 3D case:: 1. 1. 1. -.. _Perseus file format: - Perseus ******* diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 09a843d5..d72e91b5 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -12,8 +12,8 @@ The easiest way to install the Python version of GUDHI is using Compiling ********* -The library uses c++14 and requires `Boost `_ ≥ 1.56.0, -`CMake `_ ≥ 3.1 to generate makefiles, +The library uses c++14 and requires `Boost `_ :math:`\geq` 1.56.0, +`CMake `_ :math:`\geq` 3.1 to generate makefiles, `NumPy `_, `Cython `_ and `pybind11 `_ to compile the GUDHI Python module. @@ -21,7 +21,7 @@ It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2017. On `Windows `_ , only Python -≥ 3.5 are available because of the required Visual Studio version. +:math:`\geq` 3.5 are available because of the required Visual Studio version. On other systems, if you have several Python/python installed, the version 2.X will be used by default, but you can force it by adding @@ -30,7 +30,8 @@ will be used by default, but you can force it by adding GUDHI Python module compilation =============================== -To build the GUDHI Python module, run the following commands in a terminal: +After making sure that the `Compilation dependencies`_ are properly installed, +one can build the GUDHI Python module, by running the following commands in a terminal: .. code-block:: bash @@ -188,8 +189,14 @@ Run the following commands in a terminal: Optional third-party library **************************** +Compilation dependencies +======================== + +These third party dependencies are detected by `CMake `_. +They have to be installed before performing the `GUDHI Python module compilation`_. + CGAL -==== +---- Some GUDHI modules (cf. :doc:`modules list `), and few examples require `CGAL `_, a C++ library that provides easy @@ -200,7 +207,7 @@ The procedure to install this library according to your operating system is detailed `here `_. -The following examples requires CGAL version ≥ 4.11.0: +The following examples requires CGAL version :math:`\geq` 4.11.0: .. only:: builder_html @@ -211,23 +218,15 @@ The following examples requires CGAL version ≥ 4.11.0: * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` -EagerPy -======= - -Some Python functions can handle automatic differentiation (possibly only when -a flag `enable_autodiff=True` is used). In order to reduce code duplication, we -use `EagerPy `_ which wraps arrays from -PyTorch, TensorFlow and JAX in a common interface. - Eigen -===== +----- Some GUDHI modules (cf. :doc:`modules list `), and few examples require `Eigen `_, a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms. -The following examples require `Eigen `_ version ≥ 3.1.0: +The following examples require `Eigen `_ version :math:`\geq` 3.1.0: .. only:: builder_html @@ -237,15 +236,39 @@ The following examples require `Eigen `_ version * :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>` * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` +Threading Building Blocks +------------------------- + +`Intel® TBB `_ lets you easily write +parallel C++ programs that take full advantage of multicore performance, that +are portable and composable, and that have future-proof scalability. + +Having Intel® TBB installed is recommended to parallelize and accelerate some +GUDHI computations. + +Run time dependencies +===================== + +These third party dependencies are detected by Python `import` mechanism at run time. +They can be installed when required. + +EagerPy +------- + +Some Python functions can handle automatic differentiation (possibly only when +a flag `enable_autodiff=True` is used). In order to reduce code duplication, we +use `EagerPy `_ which wraps arrays from +PyTorch, TensorFlow and JAX in a common interface. + Hnswlib -======= +------- :class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package `Hnswlib `_ as a backend if explicitly requested, to speed-up queries. Matplotlib -========== +---------- The :doc:`persistence graphical tools ` module requires `Matplotlib `_, a Python 2D plotting @@ -267,49 +290,46 @@ The following examples require the `Matplotlib `_: * :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>` PyKeOps -======= +------- :class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package `PyKeOps `_ as a backend if explicitly requested, to speed-up queries using a GPU. Python Optimal Transport -======================== +------------------------ The :doc:`Wasserstein distance ` module requires `POT `_, a library that provides several solvers for optimization problems related to Optimal Transport. PyTorch -======= +------- `PyTorch `_ is currently only used as a dependency of `PyKeOps`_, and in some tests. Scikit-learn -============ +------------ The :doc:`persistence representations ` module require `scikit-learn `_, a Python-based ecosystem of open-source software for machine learning. +:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package +`scikit-learn `_ as a backend if explicitly +requested. + SciPy -===== +----- The :doc:`persistence graphical tools ` and :doc:`Wasserstein distance ` modules require `SciPy `_, a Python-based ecosystem of open-source software for mathematics, science, and engineering. -Threading Building Blocks -========================= - -`Intel® TBB `_ lets you easily write -parallel C++ programs that take full advantage of multicore performance, that -are portable and composable, and that have future-proof scalability. - -Having Intel® TBB installed is recommended to parallelize and accelerate some -GUDHI computations. +:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package +`SciPy `_ as a backend if explicitly requested. Bug reports and contributions ***************************** diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index 9101f45d..d5c5438d 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -13,7 +13,7 @@ Visualizations of the simplicial complexes can be done with either neato (from `graphviz `_), `geomview `_, `KeplerMapper `_. -Input point clouds are assumed to be OFF files (cf. :doc:`fileformats`). +Input point clouds are assumed to be OFF files (cf. `OFF file format `__). Covers ------ diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index b68d3d7e..0f41b420 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ - | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | - | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | - | | | :Since: GUDHI 2.0.0 | - | | Note that these functions return the matplotlib axis, allowing | | - | | for further modifications (title, aspect, etc.) | :License: MIT | - | | | | - | | | :Requires: matplotlib, numpy and scipy | - +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+ - | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` | - +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------+-----------------------------------------------------------------------+----------------------------------------------------------+ + | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | + | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | + | | | :Since: GUDHI 2.0.0 | + | | Note that these functions return the matplotlib axis, allowing | | + | | for further modifications (title, aspect, etc.) | :License: MIT | + | | | | + | | | :Requires: `Matplotlib `__ | + +-----------------------------------------------------------------+-----------------------------------------------------------------------+----------------------------------------------------------+ + | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` | + +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst index 91e52703..fce628b1 100644 --- a/src/python/doc/persistence_graphical_tools_user.rst +++ b/src/python/doc/persistence_graphical_tools_user.rst @@ -12,9 +12,6 @@ Definition Show persistence as a barcode ----------------------------- -.. note:: - this function requires matplotlib and numpy to be available - This function can display the persistence result as a barcode: .. plot:: @@ -36,9 +33,6 @@ This function can display the persistence result as a barcode: Show persistence as a diagram ----------------------------- -.. note:: - this function requires matplotlib and numpy to be available - This function can display the persistence result as a diagram: .. plot:: @@ -73,8 +67,7 @@ of shape (N x 2) encoding a persistence diagram (in a given dimension). Persistence density ------------------- -.. note:: - this function requires matplotlib, numpy and scipy to be available +:Requires: `SciPy `__ If you want more information on a specific dimension, for instance: diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index 192f70db..523a9dfa 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -16,6 +16,8 @@ File Readers Subsampling ----------- +:Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 + .. automodule:: gudhi.subsampling :members: :special-members: diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc index d4761aba..4315cea6 100644 --- a/src/python/doc/point_cloud_sum.inc +++ b/src/python/doc/point_cloud_sum.inc @@ -1,15 +1,12 @@ .. table:: :widths: 30 40 30 - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, | :Authors: Vincent Rouvreau, Marc Glisse, Masatoshi Takenouchi | - | | :math:`(y_1, y_2, \ldots, y_d)` | find neighbors, embed time series in higher dimension, etc. | | - | | | :Since: GUDHI 2.0.0 | - | | | | - | | | :License: MIT (`GPL v3 `_, BSD-3-Clause, Apache-2.0) | - | | Parts of this package require CGAL. | | - | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | - | | | | - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | * :doc:`point_cloud` | - +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+ + | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, | :Authors: Vincent Rouvreau, Marc Glisse, Masatoshi Takenouchi | + | | :math:`(y_1, y_2, \ldots, y_d)` | find neighbors, embed time series in higher dimension, etc. | | + | | | :Since: GUDHI 2.0.0 | + | | | | + | | | :License: MIT (`GPL v3 `_, BSD-3-Clause, Apache-2.0) | + +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+ + | * :doc:`point_cloud` | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index eac89b9d..cdad4716 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+ - | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | - | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | - | | | :Since: GUDHI 3.1.0 | - | | | | - | | | :License: MIT | - | | | | - | | | :Requires: scikit-learn | - +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+ - | * :doc:`representations` | - +------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------+----------------------------------------------------------------+--------------------------------------------------------------+ + | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | + | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | + | | | :Since: GUDHI 3.1.0 | + | | | | + | | | :License: MIT | + | | | | + | | | :Requires: `Scikit-learn `__ | + +------------------------------------------------------------------+----------------------------------------------------------------+--------------------------------------------------------------+ + | * :doc:`representations` | + +------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index c443bab5..2d2e2ae7 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -17,12 +17,21 @@ are measured in norm p, for :math:`1 \leq p \leq \infty`. Distance Functions ------------------ -This first implementation uses the Python Optimal Transport library and is based -on ideas from "Large Scale Computation of Means and Cluster for Persistence + +Optimal Transport +***************** + +:Requires: `Python Optimal Transport `__ (POT) :math:`\geq` 0.5.1 + +This first implementation uses the `Python Optimal Transport `__ +library and is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`. .. autofunction:: gudhi.wasserstein.wasserstein_distance +Hera +**** + This other implementation comes from `Hera `_ (BSD-3-Clause) which is based on "Geometry Helps to Compare Persistence Diagrams" @@ -94,6 +103,8 @@ The output is: Barycenters ----------- +:Requires: `Python Optimal Transport `__ (POT) :math:`\geq` 0.5.1 + A Frechet mean (or barycenter) is a generalization of the arithmetic mean in a non linear space such as the one of persistence diagrams. Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index cc3db467..e36af304 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -72,11 +72,11 @@ def plot_persistence_barcode( """This function plots the persistence bar code from persistence values list , a np.array of shape (N x 2) (representing a diagram in a single homology dimension), - or from a :doc:`persistence file `. + or from a `persistence diagram `__ file. :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A :doc:`persistence file ` style name + :param persistence_file: A `persistence diagram `__ file style name (reset persistence if both are set). :type persistence_file: string :param alpha: barcode transparency value (0.0 transparent through 1.0 @@ -214,11 +214,11 @@ def plot_persistence_diagram( ): """This function plots the persistence diagram from persistence values list, a np.array of shape (N x 2) representing a diagram in a single - homology dimension, or from a :doc:`persistence file `. + homology dimension, or from a `persistence diagram `__ file`. :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A :doc:`persistence file ` style name + :param persistence_file: A `persistence diagram `__ file style name (reset persistence if both are set). :type persistence_file: string :param alpha: plot transparency value (0.0 transparent through 1.0 @@ -369,17 +369,19 @@ def plot_persistence_density( """This function plots the persistence density from persistence values list, np.array of shape (N x 2) representing a diagram in a single homology dimension, - or from a :doc:`persistence file `. Be - aware that this function does not distinguish the dimension, it is + or from a `persistence diagram `__ file. + Be aware that this function does not distinguish the dimension, it is up to you to select the required one. This function also does not handle degenerate data set (scipy correlation matrix inversion can fail). + :Requires: `SciPy `__ + :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A :doc:`persistence file ` - style name (reset persistence if both are set). + :param persistence_file: A `persistence diagram `__ + file style name (reset persistence if both are set). :type persistence_file: string :param nbins: Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents (default is 300) diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 34e80b5d..19363097 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -19,6 +19,10 @@ __license__ = "MIT" class KNearestNeighbors: """ Class wrapping several implementations for computing the k nearest neighbors in a point set. + + :Requires: `PyKeOps `__, `SciPy `__, + `Scikit-learn `__, and/or `Hnswlib `__ + in function of the selected `implementation`. """ def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs): diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py index f01df442..5292e752 100644 --- a/src/python/gudhi/point_cloud/timedelay.py +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -10,9 +10,8 @@ import numpy as np class TimeDelayEmbedding: - """Point cloud transformation class. - Embeds time-series data in the R^d according to [Takens' Embedding Theorem] - (https://en.wikipedia.org/wiki/Takens%27s_theorem) and obtains the + """Point cloud transformation class. Embeds time-series data in the R^d according to + `Takens' Embedding Theorem `_ and obtains the coordinates of each point. Parameters diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index ce416fb1..0a6dd680 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -223,7 +223,9 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin): class BottleneckDistance(BaseEstimator, TransformerMixin): """ - This is a class for computing the bottleneck distance matrix from a list of persistence diagrams. + This is a class for computing the bottleneck distance matrix from a list of persistence diagrams. + + :Requires: `CGAL `__ :math:`\geq` 4.11.0 """ def __init__(self, epsilon=None): """ -- cgit v1.2.3 From 627772e4c5bc7038b0814182dbb918b08356c892 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 11 May 2020 08:42:40 +0200 Subject: Fixed by @tlacombe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Lacombe --- src/python/gudhi/wasserstein/barycenter.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index 1cf8edb3..7eeeae7a 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -52,9 +52,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that ``pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal. - - `"energy"`, ``float`` representing the Frechet energy value obtained. - - It is the mean of squared distances of observations to the output. + - `"energy"`, ``float`` representing the Frechet energy value obtained. It is the mean of squared distances of observations to the output. - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm. ''' @@ -149,4 +147,3 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): return Y, log else: return Y - -- cgit v1.2.3 From 779e4c4e8225e279ef8322988d4d06a6c2e06529 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 11 May 2020 08:43:06 +0200 Subject: Fixed by @tlacombe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Lacombe --- src/python/gudhi/wasserstein/barycenter.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py index 7eeeae7a..d67bcde7 100644 --- a/src/python/gudhi/wasserstein/barycenter.py +++ b/src/python/gudhi/wasserstein/barycenter.py @@ -47,10 +47,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False): If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate, and ``log`` is a ``dict`` that contains additional informations: - - `"groupings"`, a list of list of pairs ``(i,j)``. - - Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that ``pdiagset[k][i]`` is matched to ``Y[j]`` - if ``i = -1`` or ``j = -1``, it means they represent the diagonal. + - `"groupings"`, a list of list of pairs ``(i,j)``. Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that `pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal. - `"energy"`, ``float`` representing the Frechet energy value obtained. It is the mean of squared distances of observations to the output. -- cgit v1.2.3 From 7e85b0451c686f043b61cde2e5f78674cf8de248 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 May 2020 09:31:49 +0200 Subject: Double underscore is not the correct syntax --- src/python/doc/alpha_complex_sum.inc | 28 +++++++++++----------- src/python/doc/bottleneck_distance_sum.inc | 22 ++++++++--------- src/python/doc/cubical_complex_user.rst | 4 ++-- src/python/doc/nerve_gic_complex_sum.inc | 26 ++++++++++---------- src/python/doc/nerve_gic_complex_user.rst | 2 +- src/python/doc/persistence_graphical_tools_sum.inc | 22 ++++++++--------- .../doc/persistence_graphical_tools_user.rst | 2 +- src/python/doc/point_cloud.rst | 2 +- src/python/doc/representations_sum.inc | 22 ++++++++--------- src/python/doc/tangential_complex_sum.inc | 22 ++++++++--------- src/python/doc/wasserstein_distance_user.rst | 6 ++--- src/python/doc/witness_complex_sum.inc | 28 +++++++++++----------- src/python/gudhi/persistence_graphical_tools.py | 14 +++++------ src/python/gudhi/point_cloud/knn.py | 4 ++-- src/python/gudhi/representations/metrics.py | 2 +- 15 files changed, 103 insertions(+), 103 deletions(-) diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc index 74331333..3aba0d71 100644 --- a/src/python/doc/alpha_complex_sum.inc +++ b/src/python/doc/alpha_complex_sum.inc @@ -1,17 +1,17 @@ .. table:: :widths: 30 40 30 - +----------------------------------------------------------------+-------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | - | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | | - | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 | - | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | | - | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 `_) | - | | simplex is then said to be Gabriel), and as the minimum of the | | - | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | - | | Gabriel otherwise. | | - | | | | - | | For performances reasons, it is advised to use CGAL :math:`\geq` 5.0.0. | | - +----------------------------------------------------------------+-------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` | - +----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ + | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau | + | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | | + | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 | + | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | | + | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 `_) | + | | simplex is then said to be Gabriel), and as the minimum of the | | + | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen `_ :math:`\geq` 3.1.0 and `CGAL `_ :math:`\geq` 4.11.0 | + | | Gabriel otherwise. | | + | | | | + | | For performances reasons, it is advised to use CGAL :math:`\geq` 5.0.0. | | + +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ + | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` | + +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/bottleneck_distance_sum.inc b/src/python/doc/bottleneck_distance_sum.inc index 0de4625c..77dc368d 100644 --- a/src/python/doc/bottleneck_distance_sum.inc +++ b/src/python/doc/bottleneck_distance_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi | - | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | | - | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 | - | | diagonal points) such that any couple of matched points are at | | - | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 `_) | - | the longest edge | norm in :math:`\mathbb{R}^2`. | | - | | | :Requires: `CGAL `__ :math:`\geq` 4.11.0 | - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | * :doc:`bottleneck_distance_user` | | - +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+ + | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi | + | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | | + | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 | + | | diagonal points) such that any couple of matched points are at | | + | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 `_) | + | the longest edge | norm in :math:`\mathbb{R}^2`. | | + | | | :Requires: `CGAL `_ :math:`\geq` 4.11.0 | + +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+ + | * :doc:`bottleneck_distance_user` | | + +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst index e6e61d75..3fd4e27a 100644 --- a/src/python/doc/cubical_complex_user.rst +++ b/src/python/doc/cubical_complex_user.rst @@ -91,7 +91,7 @@ Currently one input from a text file is used. It uses a format inspired from the we allow any filtration values. As a consequence one cannot use ``-1``'s to indicate missing cubes. If you have missing cubes in your complex, please set their filtration to :math:`+\infty` (aka. ``inf`` in the file). -The file format is described in details in `Perseus file format `__ section. +The file format is described in details in `Perseus file format `_ section. .. testcode:: @@ -120,7 +120,7 @@ conditions are imposed in all directions, then complex :math:`\mathcal{K}` becam various constructors from the file Bitmap_cubical_complex_periodic_boundary_conditions_base.h to construct cubical complex with periodic boundary conditions. -One can also use Perseus style input files (see `Perseus file format `__) for the specific periodic case: +One can also use Perseus style input files (see `Perseus file format `_) for the specific periodic case: .. testcode:: diff --git a/src/python/doc/nerve_gic_complex_sum.inc b/src/python/doc/nerve_gic_complex_sum.inc index 7fe55aff..7db6c124 100644 --- a/src/python/doc/nerve_gic_complex_sum.inc +++ b/src/python/doc/nerve_gic_complex_sum.inc @@ -1,16 +1,16 @@ .. table:: :widths: 30 40 30 - +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+ - | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière | - | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | | - | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 | - | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | | - | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 `_) | - | | | | - | | | :Requires: `CGAL `__ :math:`\geq` 4.11.0 | - | | | | - | | | | - +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+ - | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` | - +----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+ + | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière | + | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | | + | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 | + | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | | + | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 `_) | + | | | | + | | | :Requires: `CGAL `_ :math:`\geq` 4.11.0 | + | | | | + | | | | + +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+ + | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` | + +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst index d5c5438d..0e67fc78 100644 --- a/src/python/doc/nerve_gic_complex_user.rst +++ b/src/python/doc/nerve_gic_complex_user.rst @@ -13,7 +13,7 @@ Visualizations of the simplicial complexes can be done with either neato (from `graphviz `_), `geomview `_, `KeplerMapper `_. -Input point clouds are assumed to be OFF files (cf. `OFF file format `__). +Input point clouds are assumed to be OFF files (cf. `OFF file format `_). Covers ------ diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc index 0f41b420..7ff63ae2 100644 --- a/src/python/doc/persistence_graphical_tools_sum.inc +++ b/src/python/doc/persistence_graphical_tools_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +-----------------------------------------------------------------+-----------------------------------------------------------------------+----------------------------------------------------------+ - | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | - | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | - | | | :Since: GUDHI 2.0.0 | - | | Note that these functions return the matplotlib axis, allowing | | - | | for further modifications (title, aspect, etc.) | :License: MIT | - | | | | - | | | :Requires: `Matplotlib `__ | - +-----------------------------------------------------------------+-----------------------------------------------------------------------+----------------------------------------------------------+ - | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` | - +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+ + | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe | + | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | | + | | | :Since: GUDHI 2.0.0 | + | | Note that these functions return the matplotlib axis, allowing | | + | | for further modifications (title, aspect, etc.) | :License: MIT | + | | | | + | | | :Requires: `Matplotlib `_ | + +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+ + | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` | + +-----------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst index fce628b1..b5a38eb1 100644 --- a/src/python/doc/persistence_graphical_tools_user.rst +++ b/src/python/doc/persistence_graphical_tools_user.rst @@ -67,7 +67,7 @@ of shape (N x 2) encoding a persistence diagram (in a given dimension). Persistence density ------------------- -:Requires: `SciPy `__ +:Requires: `SciPy `_ If you want more information on a specific dimension, for instance: diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index 523a9dfa..ffd8f85b 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -16,7 +16,7 @@ File Readers Subsampling ----------- -:Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 +:Requires: `Eigen `_ :math:`\geq` 3.1.0 and `CGAL `_ :math:`\geq` 4.11.0 .. automodule:: gudhi.subsampling :members: diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index cdad4716..323a0920 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +------------------------------------------------------------------+----------------------------------------------------------------+--------------------------------------------------------------+ - | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | - | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | - | | | :Since: GUDHI 3.1.0 | - | | | | - | | | :License: MIT | - | | | | - | | | :Requires: `Scikit-learn `__ | - +------------------------------------------------------------------+----------------------------------------------------------------+--------------------------------------------------------------+ - | * :doc:`representations` | - +------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+ + | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière | + | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | + | | | :Since: GUDHI 3.1.0 | + | | | | + | | | :License: MIT | + | | | | + | | | :Requires: `Scikit-learn `_ | + +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+ + | * :doc:`representations` | + +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/tangential_complex_sum.inc b/src/python/doc/tangential_complex_sum.inc index 45ce2a66..22314a2d 100644 --- a/src/python/doc/tangential_complex_sum.inc +++ b/src/python/doc/tangential_complex_sum.inc @@ -1,14 +1,14 @@ .. table:: :widths: 30 40 30 - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin | - | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | | - | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Since: GUDHI 2.0.0 | - | | an unknown manifold. The running time depends only linearly on the | | - | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :License: MIT (`GPL v3 `_) | - | | dimension :math:`k`. | | - | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 | - +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+ - | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` | - +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ + | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin | + | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | | + | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Since: GUDHI 2.0.0 | + | | an unknown manifold. The running time depends only linearly on the | | + | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :License: MIT (`GPL v3 `_) | + | | dimension :math:`k`. | | + | | | :Requires: `Eigen `_ :math:`\geq` 3.1.0 and `CGAL `_ :math:`\geq` 4.11.0 | + +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ + | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` | + +----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 2d2e2ae7..96ec7872 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -21,9 +21,9 @@ Distance Functions Optimal Transport ***************** -:Requires: `Python Optimal Transport `__ (POT) :math:`\geq` 0.5.1 +:Requires: `Python Optimal Transport `_ (POT) :math:`\geq` 0.5.1 -This first implementation uses the `Python Optimal Transport `__ +This first implementation uses the `Python Optimal Transport `_ library and is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`. @@ -103,7 +103,7 @@ The output is: Barycenters ----------- -:Requires: `Python Optimal Transport `__ (POT) :math:`\geq` 0.5.1 +:Requires: `Python Optimal Transport `_ (POT) :math:`\geq` 0.5.1 A Frechet mean (or barycenter) is a generalization of the arithmetic mean in a non linear space such as the one of persistence diagrams. diff --git a/src/python/doc/witness_complex_sum.inc b/src/python/doc/witness_complex_sum.inc index 34d4df4a..4416fec0 100644 --- a/src/python/doc/witness_complex_sum.inc +++ b/src/python/doc/witness_complex_sum.inc @@ -1,18 +1,18 @@ .. table:: :widths: 30 40 30 - +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ - | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich | - | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | | - | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 | - | :figclass: align-center | The data structure is described in | | - | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 `_ for Euclidean versions only) | - | | | | - | | | :Requires: `Eigen `__ :math:`\geq` 3.1.0 and `CGAL `__ :math:`\geq` 4.11.0 for Euclidean versions only | - +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ - | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` | - | | * :doc:`strong_witness_complex_ref` | - | | * :doc:`euclidean_witness_complex_ref` | - | | * :doc:`euclidean_strong_witness_complex_ref` | - +-------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich | + | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | | + | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 | + | :figclass: align-center | The data structure is described in | | + | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 `_ for Euclidean versions only) | + | | | | + | | | :Requires: `Eigen `_ :math:`\geq` 3.1.0 and `CGAL `_ :math:`\geq` 4.11.0 for Euclidean versions only | + +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` | + | | * :doc:`strong_witness_complex_ref` | + | | * :doc:`euclidean_witness_complex_ref` | + | | * :doc:`euclidean_strong_witness_complex_ref` | + +-------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py index e36af304..d59e51a0 100644 --- a/src/python/gudhi/persistence_graphical_tools.py +++ b/src/python/gudhi/persistence_graphical_tools.py @@ -72,11 +72,11 @@ def plot_persistence_barcode( """This function plots the persistence bar code from persistence values list , a np.array of shape (N x 2) (representing a diagram in a single homology dimension), - or from a `persistence diagram `__ file. + or from a `persistence diagram `_ file. :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A `persistence diagram `__ file style name + :param persistence_file: A `persistence diagram `_ file style name (reset persistence if both are set). :type persistence_file: string :param alpha: barcode transparency value (0.0 transparent through 1.0 @@ -214,11 +214,11 @@ def plot_persistence_diagram( ): """This function plots the persistence diagram from persistence values list, a np.array of shape (N x 2) representing a diagram in a single - homology dimension, or from a `persistence diagram `__ file`. + homology dimension, or from a `persistence diagram `_ file`. :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A `persistence diagram `__ file style name + :param persistence_file: A `persistence diagram `_ file style name (reset persistence if both are set). :type persistence_file: string :param alpha: plot transparency value (0.0 transparent through 1.0 @@ -369,18 +369,18 @@ def plot_persistence_density( """This function plots the persistence density from persistence values list, np.array of shape (N x 2) representing a diagram in a single homology dimension, - or from a `persistence diagram `__ file. + or from a `persistence diagram `_ file. Be aware that this function does not distinguish the dimension, it is up to you to select the required one. This function also does not handle degenerate data set (scipy correlation matrix inversion can fail). - :Requires: `SciPy `__ + :Requires: `SciPy `_ :param persistence: Persistence intervals values list. Can be grouped by dimension or not. :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death). - :param persistence_file: A `persistence diagram `__ + :param persistence_file: A `persistence diagram `_ file style name (reset persistence if both are set). :type persistence_file: string :param nbins: Evaluate a gaussian kde on a regular grid of nbins x diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py index 19363097..86008bc3 100644 --- a/src/python/gudhi/point_cloud/knn.py +++ b/src/python/gudhi/point_cloud/knn.py @@ -20,8 +20,8 @@ class KNearestNeighbors: """ Class wrapping several implementations for computing the k nearest neighbors in a point set. - :Requires: `PyKeOps `__, `SciPy `__, - `Scikit-learn `__, and/or `Hnswlib `__ + :Requires: `PyKeOps `_, `SciPy `_, + `Scikit-learn `_, and/or `Hnswlib `_ in function of the selected `implementation`. """ diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py index 0a6dd680..8a32f7e9 100644 --- a/src/python/gudhi/representations/metrics.py +++ b/src/python/gudhi/representations/metrics.py @@ -225,7 +225,7 @@ class BottleneckDistance(BaseEstimator, TransformerMixin): """ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams. - :Requires: `CGAL `__ :math:`\geq` 4.11.0 + :Requires: `CGAL `_ :math:`\geq` 4.11.0 """ def __init__(self, epsilon=None): """ -- cgit v1.2.3 From 9bfee982ae6fa6d4ca64b16d4c37e6eadf27c27a Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 May 2020 11:10:12 +0200 Subject: Fix duplicate link --- src/python/doc/alpha_complex_user.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index de706de9..373853c8 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -11,8 +11,8 @@ Definition `AlphaComplex` is constructing a :doc:`SimplexTree ` using `Delaunay Triangulation `_ -:cite:`cgal:hdj-t-19b` from `CGAL `_ (the Computational Geometry Algorithms Library -:cite:`cgal:eb-19b`). +:cite:`cgal:hdj-t-19b` from the `Computational Geometry Algorithms Library `_ +(CGAL Library :cite:`cgal:eb-19b`). Remarks ^^^^^^^ -- cgit v1.2.3 From a9fa1ba093b13f847dd3921d0c3d2d44342a4dcd Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 11 May 2020 17:06:50 +0200 Subject: Update src/python/doc/installation.rst Co-authored-by: Marc Glisse --- src/python/doc/installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index d72e91b5..de09c5b3 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -207,7 +207,7 @@ The procedure to install this library according to your operating system is detailed `here `_. -The following examples requires CGAL version :math:`\geq` 4.11.0: +The following examples require CGAL version :math:`\geq` 4.11.0: .. only:: builder_html -- cgit v1.2.3 From 0c64c706fa2c298cac079c00f71ef95061f9e6f8 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 May 2020 17:14:22 +0200 Subject: doc review --- src/python/doc/alpha_complex_user.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst index 373853c8..d49f45b4 100644 --- a/src/python/doc/alpha_complex_user.rst +++ b/src/python/doc/alpha_complex_user.rst @@ -12,7 +12,7 @@ Definition `AlphaComplex` is constructing a :doc:`SimplexTree ` using `Delaunay Triangulation `_ :cite:`cgal:hdj-t-19b` from the `Computational Geometry Algorithms Library `_ -(CGAL Library :cite:`cgal:eb-19b`). +:cite:`cgal:eb-19b`. Remarks ^^^^^^^ -- cgit v1.2.3 From f94c2e1b7ba982fda62239f5c6b378bda867cd40 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 11 May 2020 19:56:06 +0200 Subject: More DOI in the biblio and update references from a preprint to the published version --- biblio/bibliography.bib | 8 +++++++- src/Persistent_cohomology/doc/Intro_persistent_cohomology.h | 2 +- src/common/doc/main_page.md | 2 +- src/python/doc/persistent_cohomology_sum.inc | 2 +- src/python/doc/persistent_cohomology_user.rst | 2 +- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index 99a15c5e..3ea2f59f 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -13,7 +13,9 @@ pages = {1--39}, publisher = {JMLR.org}, title = {{Statistical analysis and parameter selection for Mapper}}, volume = {19}, -year = {2018} +year = {2018}, +url = {http://jmlr.org/papers/v19/17-291.html}, +doi = {10.5555/3291125.3291137} } @inproceedings{Dey13, @@ -22,6 +24,7 @@ year = {2018} booktitle = {Proceedings of the Twenty-ninth Annual Symposium on Computational Geometry}, year = {2013}, pages = {107--116}, + doi = {10.1145/2462356.2462387} } @article{Carriere16, @@ -832,6 +835,7 @@ book{hatcher2002algebraic, number = {4}, year = {2010}, pages = {367-405}, + doi = {10.1007/s10208-010-9066-0}, ee = {http://dx.doi.org/10.1007/s10208-010-9066-0}, bibsource = {DBLP, http://dblp.uni-trier.de} } @@ -927,6 +931,7 @@ language={English} booktitle = {Symposium on Computational Geometry}, year = {2014}, pages = {345}, + doi = {10.1145/2582112.2582165}, ee = {http://doi.acm.org/10.1145/2582112.2582165}, bibsource = {DBLP, http://dblp.uni-trier.de} } @@ -1241,6 +1246,7 @@ year = "2011" title={Fr{\'e}chet means for distributions of persistence diagrams}, author={Turner, Katharine and Mileyko, Yuriy and Mukherjee, Sayan and Harer, John}, journal={Discrete \& Computational Geometry}, + doi={10.1007/s00454-014-9604-7}, volume={52}, number={1}, pages={44--70}, diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h index 46b784d8..b4f9fd2c 100644 --- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h +++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h @@ -21,7 +21,7 @@ namespace persistent_cohomology { \author Clément Maria Computation of persistent cohomology using the algorithm of - \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:journals/corr/abs-1208-5018 + \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:conf/compgeom/DeyFW14 and the Compressed Annotation Matrix implementation of \cite DBLP:conf/esa/BoissonnatDM13 diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md index 6ea10b88..a33d98cd 100644 --- a/src/common/doc/main_page.md +++ b/src/common/doc/main_page.md @@ -312,7 +312,7 @@ theory is essentially composed of three elements: topological spaces, their homology groups and an evolution scheme. Computation of persistent cohomology using the algorithm of \cite DBLP:journals/dcg/SilvaMV11 and - \cite DBLP:journals/corr/abs-1208-5018 and the Compressed Annotation Matrix implementation of + \cite DBLP:conf/compgeom/DeyFW14 and the Compressed Annotation Matrix implementation of \cite DBLP:conf/esa/BoissonnatDM13 . diff --git a/src/python/doc/persistent_cohomology_sum.inc b/src/python/doc/persistent_cohomology_sum.inc index 0effb50f..a1ff2eee 100644 --- a/src/python/doc/persistent_cohomology_sum.inc +++ b/src/python/doc/persistent_cohomology_sum.inc @@ -12,7 +12,7 @@ | | | | | | Computation of persistent cohomology using the algorithm of | | | | :cite:`DBLP:journals/dcg/SilvaMV11` and | | - | | :cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed | | + | | :cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed | | | | Annotation Matrix implementation of | | | | :cite:`DBLP:conf/esa/BoissonnatDM13`. | | | | | | diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst index 4d743aac..a3f294b2 100644 --- a/src/python/doc/persistent_cohomology_user.rst +++ b/src/python/doc/persistent_cohomology_user.rst @@ -21,7 +21,7 @@ Definition Computation of persistent cohomology using the algorithm of :cite:`DBLP:journals/dcg/SilvaMV11` and -:cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed Annotation Matrix implementation of +:cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed Annotation Matrix implementation of :cite:`DBLP:conf/esa/BoissonnatDM13`. The theory of homology consists in attaching to a topological space a sequence of (homology) groups, capturing global -- cgit v1.2.3 From 6c17494e02721ca826750155bac14c7f91a173fa Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 12 May 2020 09:37:32 +0900 Subject: reference and comments added --- biblio/bibliography.bib | 26 ++++++++++++++++++++++++++ src/python/CMakeLists.txt | 4 +++- src/python/doc/rips_complex_ref.rst | 4 +++- src/python/gudhi/weighted_rips_complex.py | 6 +++--- src/python/test/test_weighted_rips.py | 4 ++-- 5 files changed, 37 insertions(+), 7 deletions(-) diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib index 99a15c5e..f405b9bb 100644 --- a/biblio/bibliography.bib +++ b/biblio/bibliography.bib @@ -1247,3 +1247,29 @@ year = "2011" year={2014}, publisher={Springer} } + +@inproceedings{dtmfiltrations, + author = {Hirokazu Anai and + Fr{\'{e}}d{\'{e}}ric Chazal and + Marc Glisse and + Yuichi Ike and + Hiroya Inakoshi and + Rapha{\"{e}}l Tinarrage and + Yuhei Umeda}, + editor = {Gill Barequet and + Yusu Wang}, + title = {DTM-Based Filtrations}, + booktitle = {35th International Symposium on Computational Geometry, SoCG 2019, + June 18-21, 2019, Portland, Oregon, {USA}}, + series = {LIPIcs}, + volume = {129}, + pages = {58:1--58:15}, + publisher = {Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik}, + year = {2019}, + url = {https://doi.org/10.4230/LIPIcs.SoCG.2019.58}, + doi = {10.4230/LIPIcs.SoCG.2019.58}, + timestamp = {Tue, 11 Feb 2020 15:52:14 +0100}, + biburl = {https://dblp.org/rec/conf/compgeom/AnaiCGIITU19.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index adf4923b..0aa55467 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -487,7 +487,9 @@ if(PYTHONINTERP_FOUND) endif() # Weighted Rips - add_gudhi_py_test(test_weighted_rips) + if(SCIPY_FOUND) + add_gudhi_py_test(test_weighted_rips) + endif() # Set missing or not modules set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES") diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 8fc7e1b0..3c25564a 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -25,7 +25,7 @@ Weighted Rips complex reference manual .. automethod:: gudhi.WeightedRipsComplex.__init__ Basic examples -------------- +-------------- The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices. @@ -60,6 +60,8 @@ Combining with DistanceToMeasure, one can compute the DTM-filtration of a point st = w_rips.create_simplex_tree(max_dimension=2) print(st.persistence()) +The output is: + .. testoutput:: [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))] diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index 7401c428..bccac1ff 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -12,9 +12,9 @@ from gudhi import SimplexTree class WeightedRipsComplex: """ Class to generate a weighted Rips complex from a distance matrix and weights on vertices, - in the way described in the paper 'DTM-based filtrations' https://arxiv.org/abs/1811.04757. - Remark that the filtration value of a vertex is twice of its weight for the consistency with - RipsComplex, which is different from the definition in the paper. + in the way described in :cite:`dtmfiltrations`. + Remark that all the filtration values of vertices are twice of the given weights for the consistency + with RipsComplex, which is different from the definition in the paper. """ def __init__(self, distance_matrix, diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py index 59ec022a..7ef48333 100644 --- a/src/python/test/test_weighted_rips.py +++ b/src/python/test/test_weighted_rips.py @@ -35,8 +35,8 @@ def test_compatibility_with_rips(): ([0, 2], 1.0), ([1, 3], 1.0), ([2, 3], 1.0), - ([1, 2], 1.4142135623730951), - ([0, 3], 1.4142135623730951), + ([1, 2], sqrt(2)), + ([0, 3], sqrt(2)), ] def test_compatibility_with_filtered_rips(): -- cgit v1.2.3 From a9c1e13e7f994e5c8d9f1c3d0311a5815df1e67d Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 12 May 2020 11:10:16 +0900 Subject: document fixed --- src/python/doc/rips_complex_ref.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 3c25564a..8946d156 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -22,7 +22,7 @@ Weighted Rips complex reference manual :undoc-members: :show-inheritance: - .. automethod:: gudhi.WeightedRipsComplex.__init__ + .. automethod:: gudhi.weighted_rips_complex.WeightedRipsComplex.__init__ Basic examples -------------- -- cgit v1.2.3 From 23547c0cbbe9e42b4dfadec3a116751302fd19ab Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 12 May 2020 11:41:03 +0900 Subject: document fixed --- src/python/doc/rips_complex_ref.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 8946d156..1f73f95b 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -17,7 +17,7 @@ Rips complex reference manual Weighted Rips complex reference manual ====================================== -.. autoclass:: gudhi.WeightedRipsComplex +.. autoclass:: gudhi.weighted_rips_complex.WeightedRipsComplex :members: :undoc-members: :show-inheritance: -- cgit v1.2.3 From 2c4049895bb2844c2ad1b43b9df51ad5b259fc39 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 12 May 2020 13:09:40 +0900 Subject: a test in a document fixed --- src/python/doc/rips_complex_ref.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 1f73f95b..a5b4ffed 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -36,7 +36,7 @@ The following example computes the weighted Rips filtration associated with a di weights = [1, 100] w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights) st = w_rips.create_simplex_tree(max_dimension=2) - print(st.get_filtration()) + print(list(st.get_filtration())) The output is: -- cgit v1.2.3 From c60caee5623d0b1ef55e7b2a5854604080419df1 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Tue, 12 May 2020 15:06:55 +0900 Subject: comment modified --- src/python/gudhi/weighted_rips_complex.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py index bccac1ff..0541572b 100644 --- a/src/python/gudhi/weighted_rips_complex.py +++ b/src/python/gudhi/weighted_rips_complex.py @@ -13,8 +13,8 @@ class WeightedRipsComplex: """ Class to generate a weighted Rips complex from a distance matrix and weights on vertices, in the way described in :cite:`dtmfiltrations`. - Remark that all the filtration values of vertices are twice of the given weights for the consistency - with RipsComplex, which is different from the definition in the paper. + Remark that all the filtration values are doubled compared to the definition in the paper + for the consistency with RipsComplex. """ def __init__(self, distance_matrix, -- cgit v1.2.3 From fd7112b7e665d495543d9647f675a14f75061bbf Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Wed, 13 May 2020 09:54:47 +0900 Subject: documents modified --- src/python/doc/rips_complex_ref.rst | 42 ------------------------------- src/python/doc/rips_complex_sum.inc | 3 +++ src/python/doc/rips_complex_user.rst | 48 ++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 42 deletions(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index a5b4ffed..9ae3c49c 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -23,45 +23,3 @@ Weighted Rips complex reference manual :show-inheritance: .. automethod:: gudhi.weighted_rips_complex.WeightedRipsComplex.__init__ - -Basic examples --------------- - -The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices. - -.. testcode:: - - from gudhi.weighted_rips_complex import WeightedRipsComplex - dist = [[], [1]] - weights = [1, 100] - w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights) - st = w_rips.create_simplex_tree(max_dimension=2) - print(list(st.get_filtration())) - -The output is: - -.. testoutput:: - - [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)] - -Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook `_. - -.. testcode:: - - import numpy as np - from scipy.spatial.distance import cdist - from gudhi.point_cloud.dtm import DistanceToMeasure - from gudhi.weighted_rips_complex import WeightedRipsComplex - pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]]) - dist = cdist(pts,pts) - dtm = DistanceToMeasure(2, q=2, metric="precomputed") - r = dtm.fit_transform(dist) - w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r) - st = w_rips.create_simplex_tree(max_dimension=2) - print(st.persistence()) - -The output is: - -.. testoutput:: - - [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))] diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc index 6feb74cd..f7580714 100644 --- a/src/python/doc/rips_complex_sum.inc +++ b/src/python/doc/rips_complex_sum.inc @@ -11,6 +11,9 @@ | | | | | | This complex can be built from a point cloud and a distance function, | | | | or from a distance matrix. | | + | | | | + | | Weighted Rips complex constructs a simplicial complex from a distance | | + | | matrix and weights on vertices. | | +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+ | * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` | +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index 8efb12e6..adb002a8 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -347,3 +347,51 @@ until dimension 1 - one skeleton graph in other words), the output is: points in the persistence diagram will be under the diagonal, and bottleneck distance and persistence graphical tool will not work properly, this is a known issue. + +Weighted Rips Complex +--------------------- + +Example from a distance matrix and weights +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices. + +.. testcode:: + + from gudhi.weighted_rips_complex import WeightedRipsComplex + dist = [[], [1]] + weights = [1, 100] + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights) + st = w_rips.create_simplex_tree(max_dimension=2) + print(list(st.get_filtration())) + +The output is: + +.. testoutput:: + + [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)] + +Example from a point cloud combined with DistanceToMeasure +---------------------------------------------------------- + +Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook `_. + +.. testcode:: + + import numpy as np + from scipy.spatial.distance import cdist + from gudhi.point_cloud.dtm import DistanceToMeasure + from gudhi.weighted_rips_complex import WeightedRipsComplex + pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]]) + dist = cdist(pts,pts) + dtm = DistanceToMeasure(2, q=2, metric="precomputed") + r = dtm.fit_transform(dist) + w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r) + st = w_rips.create_simplex_tree(max_dimension=2) + print(st.persistence()) + +The output is: + +.. testoutput:: + + [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))] -- cgit v1.2.3 From 7b4ffb762edae9036cbec12b34eeb64f2cffd0e7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 13 May 2020 18:12:58 +0200 Subject: Rephrase comment about cubes --- .../include/gudhi/Bitmap_cubical_complex_base.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index e0c567ae..99487dc3 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -112,9 +112,9 @@ class Bitmap_cubical_complex_base { /** * This function finds a top-dimensional cell that is incident to the input cell and has * the same filtration value. In case several cells are suitable, an arbitrary one is - * returned. Note that the input parameter is not necessarily a cube, it might also - * be an edge or vertex of a cube. On the other hand, the output is always indicating the position of - * a cube in the data structure. + * returned. Note that the input parameter can be a cell of any dimension (vertex, edge, etc). + * On the other hand, the output is always indicating the position of + * a top-dimensional cube in the data structure. **/ inline size_t get_top_dimensional_coface_of_a_cell(size_t splx); -- cgit v1.2.3 From 5c3e042628b7db2b82d92f644f7ab0fc409a357b Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 13 May 2020 18:45:28 +0200 Subject: BOOST_UNREACHABLE_RETURN + comment --- .../include/gudhi/Bitmap_cubical_complex_base.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 99487dc3..5927bbec 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -13,6 +13,8 @@ #include +#include + #include #include #include @@ -115,6 +117,7 @@ class Bitmap_cubical_complex_base { * returned. Note that the input parameter can be a cell of any dimension (vertex, edge, etc). * On the other hand, the output is always indicating the position of * a top-dimensional cube in the data structure. + * \pre The filtration values are assigned as per `impose_lower_star_filtration()`. **/ inline size_t get_top_dimensional_coface_of_a_cell(size_t splx); @@ -621,7 +624,7 @@ size_t Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(size } } } - return splx; + BOOST_UNREACHABLE_RETURN(-2); } template -- cgit v1.2.3 From b2118cde83056b43cea095f5208d37744c9f088f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 13 May 2020 18:51:16 +0200 Subject: compute_persistence --- src/python/gudhi/cubical_complex.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index b16a037f..9ebd0b30 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -200,7 +200,7 @@ cdef class CubicalComplex: integers of each row in each array correspond to: (index of positive top-dimensional cell). """ - assert self.pcohptr != NULL, "cofaces_of_persistence_pairs function requires persistence function to be launched first." + assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()" cdef vector[vector[int]] persistence_result output = [[],[]] -- cgit v1.2.3 From 7bbc1ae35d492123c517a54a9595188938e52dff Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 13 May 2020 19:32:21 +0200 Subject: More size_t --- .../include/Persistent_cohomology_interface.h | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index cec18546..e5a3dfba 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -13,6 +13,7 @@ #include +#include #include #include // for std::pair #include // for sort @@ -81,32 +82,31 @@ persistent_cohomology::Persistent_cohomology::get_persistent_pairs(); // Gather all top-dimensional cells and store their simplex handles - std::vector max_splx; for (auto splx : stptr_->top_dimensional_cells_range()){ - max_splx.push_back(splx); - } + std::vector max_splx; + for (auto splx : stptr_->top_dimensional_cells_range()) + max_splx.push_back(splx); // Sort these simplex handles and compute the ordering function - // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data - std::unordered_map order; - //std::sort(max_splx.begin(), max_splx.end()); + // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data + std::unordered_map order; + //std::sort(max_splx.begin(), max_splx.end()); for (unsigned int i = 0; i < max_splx.size(); i++) order.emplace(max_splx[i], i); std::vector> persistence_pairs; for (auto pair : pairs) { int h = stptr_->dimension(get<0>(pair)); // Recursively get the top-dimensional cell / coface associated to the persistence generator - int face0 = stptr_->get_top_dimensional_coface_of_a_cell(get<0>(pair)); + std::size_t face0 = stptr_->get_top_dimensional_coface_of_a_cell(get<0>(pair)); // Retrieve the index of the corresponding top-dimensional cell in the input data int splx0 = order[face0]; int splx1 = -1; - if (isfinite(stptr_->filtration(get<1>(pair)))){ - // Recursively get the top-dimensional cell / coface associated to the persistence generator - int face1 = stptr_->get_top_dimensional_coface_of_a_cell(get<1>(pair)); - // Retrieve the index of the corresponding top-dimensional cell in the input data - splx1 = order[face1]; + if (get<1>(pair) != stptr_->null_simplex()){ + // Recursively get the top-dimensional cell / coface associated to the persistence generator + std::size_t face1 = stptr_->get_top_dimensional_coface_of_a_cell(get<1>(pair)); + // Retrieve the index of the corresponding top-dimensional cell in the input data + splx1 = order[face1]; } - std::vector vect{ h, splx0, splx1}; - persistence_pairs.push_back(vect); + persistence_pairs.push_back({ h, splx0, splx1 }); } return persistence_pairs; } -- cgit v1.2.3 From b0ae08e93fdba8a1faec56c2230b6f542653c49e Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 13 May 2020 20:17:26 +0200 Subject: Trailing whitespace --- .../include/gudhi/Bitmap_cubical_complex_base.h | 8 ++--- src/python/gudhi/cubical_complex.pyx | 34 +++++++++++----------- src/python/gudhi/periodic_cubical_complex.pyx | 34 +++++++++++----------- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h index 5927bbec..58d9208d 100644 --- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h +++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h @@ -112,8 +112,8 @@ class Bitmap_cubical_complex_base { virtual inline std::vector get_coboundary_of_a_cell(std::size_t cell) const; /** - * This function finds a top-dimensional cell that is incident to the input cell and has - * the same filtration value. In case several cells are suitable, an arbitrary one is + * This function finds a top-dimensional cell that is incident to the input cell and has + * the same filtration value. In case several cells are suitable, an arbitrary one is * returned. Note that the input parameter can be a cell of any dimension (vertex, edge, etc). * On the other hand, the output is always indicating the position of * a top-dimensional cube in the data structure. @@ -617,12 +617,12 @@ void Bitmap_cubical_complex_base::setup_bitmap_based_on_top_dimensional_cells template size_t Bitmap_cubical_complex_base::get_top_dimensional_coface_of_a_cell(size_t splx) { if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;} - else{ + else{ for (auto v : this->get_coboundary_of_a_cell(splx)){ if(this->get_cell_data(v) == this->get_cell_data(splx)){ return this->get_top_dimensional_coface_of_a_cell(v); } - } + } } BOOST_UNREACHABLE_RETURN(-2); } diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx index 9ebd0b30..ca979eda 100644 --- a/src/python/gudhi/cubical_complex.pyx +++ b/src/python/gudhi/cubical_complex.pyx @@ -172,31 +172,31 @@ cdef class CubicalComplex: return self.pcohptr.get_persistence() def cofaces_of_persistence_pairs(self): - """A persistence interval is described by a pair of cells, one that creates the - feature and one that kills it. The filtration values of those 2 cells give coordinates - for a point in a persistence diagram, or a bar in a barcode. Structurally, in the - cubical complexes provided here, the filtration value of any cell is the minimum of the - filtration values of the maximal cells that contain it. Connecting persistence diagram - coordinates to the corresponding value in the input (i.e. the filtration values of + """A persistence interval is described by a pair of cells, one that creates the + feature and one that kills it. The filtration values of those 2 cells give coordinates + for a point in a persistence diagram, or a bar in a barcode. Structurally, in the + cubical complexes provided here, the filtration value of any cell is the minimum of the + filtration values of the maximal cells that contain it. Connecting persistence diagram + coordinates to the corresponding value in the input (i.e. the filtration values of the top-dimensional cells) is useful for differentiation purposes. - This function returns a list of pairs of top-dimensional cells corresponding to - the persistence birth and death cells of the filtration. The cells are represented by - their indices in the input list of top-dimensional cells (and not their indices in the - internal datastructure that includes non-maximal cells). Note that when two adjacent + This function returns a list of pairs of top-dimensional cells corresponding to + the persistence birth and death cells of the filtration. The cells are represented by + their indices in the input list of top-dimensional cells (and not their indices in the + internal datastructure that includes non-maximal cells). Note that when two adjacent top-dimensional cells have the same filtration value, we arbitrarily return one of the two when calling the function on one of their common faces. - :returns: The top-dimensional cells/cofaces of the positive and negative cells, + :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension, in two lists of numpy arrays of integers. - The first list contains the regular persistence pairs, grouped by dimension. + The first list contains the regular persistence pairs, grouped by dimension. It contains numpy arrays of shape [number_of_persistence_points, 2]. - The indices of the arrays in the list correspond to the homological dimensions, and the - integers of each row in each array correspond to: (index of positive top-dimensional cell, - index of negative top-dimensional cell). - The second list contains the essential features, grouped by dimension. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell, + index of negative top-dimensional cell). + The second list contains the essential features, grouped by dimension. It contains numpy arrays of shape [number_of_persistence_points, 1]. - The indices of the arrays in the list correspond to the homological dimensions, and the + The indices of the arrays in the list correspond to the homological dimensions, and the integers of each row in each array correspond to: (index of positive top-dimensional cell). """ diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx index 3cf2ff01..06309772 100644 --- a/src/python/gudhi/periodic_cubical_complex.pyx +++ b/src/python/gudhi/periodic_cubical_complex.pyx @@ -177,31 +177,31 @@ cdef class PeriodicCubicalComplex: return self.pcohptr.get_persistence() def cofaces_of_persistence_pairs(self): - """A persistence interval is described by a pair of cells, one that creates the - feature and one that kills it. The filtration values of those 2 cells give coordinates - for a point in a persistence diagram, or a bar in a barcode. Structurally, in the - cubical complexes provided here, the filtration value of any cell is the minimum of the - filtration values of the maximal cells that contain it. Connecting persistence diagram - coordinates to the corresponding value in the input (i.e. the filtration values of + """A persistence interval is described by a pair of cells, one that creates the + feature and one that kills it. The filtration values of those 2 cells give coordinates + for a point in a persistence diagram, or a bar in a barcode. Structurally, in the + cubical complexes provided here, the filtration value of any cell is the minimum of the + filtration values of the maximal cells that contain it. Connecting persistence diagram + coordinates to the corresponding value in the input (i.e. the filtration values of the top-dimensional cells) is useful for differentiation purposes. - This function returns a list of pairs of top-dimensional cells corresponding to - the persistence birth and death cells of the filtration. The cells are represented by - their indices in the input list of top-dimensional cells (and not their indices in the - internal datastructure that includes non-maximal cells). Note that when two adjacent + This function returns a list of pairs of top-dimensional cells corresponding to + the persistence birth and death cells of the filtration. The cells are represented by + their indices in the input list of top-dimensional cells (and not their indices in the + internal datastructure that includes non-maximal cells). Note that when two adjacent top-dimensional cells have the same filtration value, we arbitrarily return one of the two when calling the function on one of their common faces. - :returns: The top-dimensional cells/cofaces of the positive and negative cells, + :returns: The top-dimensional cells/cofaces of the positive and negative cells, together with the corresponding homological dimension, in two lists of numpy arrays of integers. - The first list contains the regular persistence pairs, grouped by dimension. + The first list contains the regular persistence pairs, grouped by dimension. It contains numpy arrays of shape [number_of_persistence_points, 2]. - The indices of the arrays in the list correspond to the homological dimensions, and the - integers of each row in each array correspond to: (index of positive top-dimensional cell, - index of negative top-dimensional cell). - The second list contains the essential features, grouped by dimension. + The indices of the arrays in the list correspond to the homological dimensions, and the + integers of each row in each array correspond to: (index of positive top-dimensional cell, + index of negative top-dimensional cell). + The second list contains the essential features, grouped by dimension. It contains numpy arrays of shape [number_of_persistence_points, 1]. - The indices of the arrays in the list correspond to the homological dimensions, and the + The indices of the arrays in the list correspond to the homological dimensions, and the integers of each row in each array correspond to: (index of positive top-dimensional cell). """ cdef vector[vector[int]] persistence_result -- cgit v1.2.3 From 4d27d32308f94e63d76bbd5564b8837b94b24339 Mon Sep 17 00:00:00 2001 From: yuichi-ike Date: Thu, 14 May 2020 17:56:10 +0900 Subject: document modified --- src/python/doc/rips_complex_ref.rst | 2 ++ src/python/doc/rips_complex_user.rst | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst index 9ae3c49c..5f3e46c1 100644 --- a/src/python/doc/rips_complex_ref.rst +++ b/src/python/doc/rips_complex_ref.rst @@ -13,6 +13,8 @@ Rips complex reference manual .. automethod:: gudhi.RipsComplex.__init__ +.. _weighted-rips-complex-reference-manual: + ====================================== Weighted Rips complex reference manual ====================================== diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst index adb002a8..819568be 100644 --- a/src/python/doc/rips_complex_user.rst +++ b/src/python/doc/rips_complex_user.rst @@ -351,6 +351,9 @@ until dimension 1 - one skeleton graph in other words), the output is: Weighted Rips Complex --------------------- +`WeightedRipsComplex `_ builds a simplicial complex from a distance matrix and weights on vertices. + + Example from a distance matrix and weights ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -372,7 +375,7 @@ The output is: [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)] Example from a point cloud combined with DistanceToMeasure ----------------------------------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook `_. -- cgit v1.2.3 From a74503eca0f30a8183719008cd02b48823ba72d4 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Sat, 16 May 2020 09:52:47 +0200 Subject: Release note for version 3.2.0 --- .github/next_release.md | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/.github/next_release.md b/.github/next_release.md index 83b98a1c..1112ef70 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -1,21 +1,50 @@ -We are pleased to announce the release 3.X.X of the GUDHI library. +We are pleased to announce the release 3.2.0 of the GUDHI library. As a major new feature, the GUDHI library now offers a Python interface to [Hera](https://bitbucket.org/grey_narn/hera/src/master/) to compute the Wasserstein distance. [PyBind11](https://github.com/pybind/pybind11) is now required to build the Python module. -We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.2.0.tar.gz). Below is a list of changes made since GUDHI 3.1.1: +- Point cloud utilities + - A new module [Time Delay Embedding](https://gudhi.inria.fr/python/latest/point_cloud.html#time-delay-embedding) + to embed time-series data in the R^d according to [Takens' Embedding Theorem](https://en.wikipedia.org/wiki/Takens%27s_theorem) + and obtain the coordinates of each point. + - A new module [K Nearest Neighbors](https://gudhi.inria.fr/python/latest/point_cloud.html#k-nearest-neighbors) + that wraps several implementations for computing the k nearest neighbors in a point set. + - A new module [Distance To Measure](https://gudhi.inria.fr/python/latest/point_cloud.html#distance-to-measure) + to compute the distance to the empirical measure defined by a point set + +- [Persistence representations](https://gudhi.inria.fr/python/latest/representations.html) + - Interface to Wasserstein distances. + +- Rips complex + - A new module [Weighted Rips Complex](https://gudhi.inria.fr/python/latest/rips_complex_user.html#weighted-rips-complex) + to construct a simplicial complex from a distance matrix and weights on vertices. + - [Wassertein distance](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html) - - An another implementation comes from Hera (BSD-3-Clause) which is based on [Geometry Helps to Compare Persistence Diagrams](http://doi.acm.org/10.1145/3064175) by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov. + - An [another implementation](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html#hera) + comes from Hera (BSD-3-Clause) which is based on [Geometry Helps to Compare Persistence Diagrams](http://doi.acm.org/10.1145/3064175) + by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov. - `gudhi.wasserstein.wasserstein_distance` has now an option to return the optimal matching that achieves the distance between the two diagrams. + - A new module [Barycenters](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html#barycenters) + to estimate the Frechet mean (aka Wasserstein barycenter) between persistence diagrams. + +- [Simplex tree](https://gudhi.inria.fr/python/latest/simplex_tree_ref.html) + - Extend filtration method to compute extended persistence + - Flag and lower star persistence pairs generators + - A new interface to filtration, simplices and skeleton getters to return an iterator + +- [Alpha complex](https://gudhi.inria.fr/doc/latest/group__alpha__complex.html) + - Improve computations (cache circumcenters computation and point comparison improvement) -- [Module](link) - - ... +- [Persistence graphical tools](https://gudhi.inria.fr/python/latest/persistence_graphical_tools_user.html) + - Use LaTeX style and grey block + - (N x 2) numpy arrays as input - Miscellaneous - - The [list of bugs that were solved since GUDHI-3.1.1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.2.0+is%3Aclosed) is available on GitHub. + - The [list of bugs that were solved since GUDHI-3.2.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.2.0+is%3Aclosed) is available on GitHub. All modules are distributed under the terms of the MIT license. However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. -- cgit v1.2.3 From 8dfc31c57586b07524728c939593f216c5d640f5 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Sat, 16 May 2020 10:33:23 +0200 Subject: Add submodule init in the worflow --- .github/for_maintainers/new_gudhi_version_creation.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md index f176d392..8674222b 100644 --- a/.github/for_maintainers/new_gudhi_version_creation.md +++ b/.github/for_maintainers/new_gudhi_version_creation.md @@ -16,6 +16,7 @@ rm -rf data/points/COIL_database/lucky_cat.off_dist data/points/COIL_database/lu Checkin the modifications, build and test the version: ```bash +git submodule update --init mkdir build cd build cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 .. -- cgit v1.2.3 From beadbbbefa1f8f30233a534b6c9cdf11ffb65f93 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 18 May 2020 07:40:59 +0200 Subject: When Reviewing dependencies, I missed this one --- src/python/doc/wasserstein_distance_sum.inc | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc index f9308e5e..c41de017 100644 --- a/src/python/doc/wasserstein_distance_sum.inc +++ b/src/python/doc/wasserstein_distance_sum.inc @@ -1,14 +1,12 @@ .. table:: :widths: 30 40 30 - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe | - | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | | - | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 | - | | barycenters of a family of persistence diagrams. | | - | | | :License: MIT | - | | | | - | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 | - +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+ - | * :doc:`wasserstein_distance_user` | | - +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+ + | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe, Marc Glisse | + | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | | + | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 | + | | barycenters of a family of persistence diagrams. | | + | | | :License: MIT, BSD-3-Clause | + +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+ + | * :doc:`wasserstein_distance_user` | | + +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+ -- cgit v1.2.3 From 2e8a4a71a22350b9301cc6052165d97357f12f83 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 18 May 2020 08:54:20 +0200 Subject: Update gudhi version --- CMakeGUDHIVersion.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt index 0f827b9e..cf9fd4f7 100644 --- a/CMakeGUDHIVersion.txt +++ b/CMakeGUDHIVersion.txt @@ -1,6 +1,6 @@ set (GUDHI_MAJOR_VERSION 3) -set (GUDHI_MINOR_VERSION 1) -set (GUDHI_PATCH_VERSION 1) +set (GUDHI_MINOR_VERSION 2) +set (GUDHI_PATCH_VERSION 0.rc2) set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION}) message(STATUS "GUDHI version : ${GUDHI_VERSION}") -- cgit v1.2.3 From 3f14070864e4556bb137ee16d80496185435b469 Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 18 May 2020 09:27:15 +0200 Subject: Update .github/next_release.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Lacombe --- .github/next_release.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/next_release.md b/.github/next_release.md index 1112ef70..cd9488c6 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -41,7 +41,7 @@ Below is a list of changes made since GUDHI 3.1.1: - [Persistence graphical tools](https://gudhi.inria.fr/python/latest/persistence_graphical_tools_user.html) - Use LaTeX style and grey block - - (N x 2) numpy arrays as input + - Can now handle (N x 2) numpy arrays as input - Miscellaneous - The [list of bugs that were solved since GUDHI-3.2.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.2.0+is%3Aclosed) is available on GitHub. @@ -56,4 +56,3 @@ We provide [bibtex entries](https://gudhi.inria.fr/doc/latest/_citation.html) fo Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any questions or remarks. For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/). - -- cgit v1.2.3 From 3e52b65a55b615929556597acc963246f76475ff Mon Sep 17 00:00:00 2001 From: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> Date: Mon, 18 May 2020 09:27:25 +0200 Subject: Update .github/next_release.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Lacombe --- .github/next_release.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/next_release.md b/.github/next_release.md index cd9488c6..d3c9ce68 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -40,7 +40,7 @@ Below is a list of changes made since GUDHI 3.1.1: - Improve computations (cache circumcenters computation and point comparison improvement) - [Persistence graphical tools](https://gudhi.inria.fr/python/latest/persistence_graphical_tools_user.html) - - Use LaTeX style and grey block + - New rendering option proposed (use LaTeX style, add grey block, improved positioning of labels, etc.). - Can now handle (N x 2) numpy arrays as input - Miscellaneous -- cgit v1.2.3 From 97e889f34e929f3c2306803b6c37b57926bd1245 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 20 May 2020 07:32:26 +0200 Subject: 3.2.0 version --- CMakeGUDHIVersion.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt index cf9fd4f7..ac89fa4d 100644 --- a/CMakeGUDHIVersion.txt +++ b/CMakeGUDHIVersion.txt @@ -1,6 +1,6 @@ set (GUDHI_MAJOR_VERSION 3) set (GUDHI_MINOR_VERSION 2) -set (GUDHI_PATCH_VERSION 0.rc2) +set (GUDHI_PATCH_VERSION 0) set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION}) message(STATUS "GUDHI version : ${GUDHI_VERSION}") -- cgit v1.2.3