summaryrefslogtreecommitdiff
path: root/include/gudhi
diff options
context:
space:
mode:
Diffstat (limited to 'include/gudhi')
-rw-r--r--include/gudhi/Active_witness/Active_witness.h67
-rw-r--r--include/gudhi/Active_witness/Active_witness_iterator.h108
-rw-r--r--include/gudhi/Alpha_complex.h233
-rw-r--r--include/gudhi/Alpha_complex.h~417
-rw-r--r--include/gudhi/Bottleneck.h115
-rw-r--r--include/gudhi/Clock.h48
-rw-r--r--include/gudhi/Debug_utils.h2
-rw-r--r--include/gudhi/Edge_contraction.h2
-rw-r--r--include/gudhi/Euclidean_strong_witness_complex.h104
-rw-r--r--include/gudhi/Euclidean_witness_complex.h106
-rw-r--r--include/gudhi/Graph_matching.h174
-rw-r--r--include/gudhi/Internal_point.h91
-rw-r--r--include/gudhi/Kd_tree_search.h264
-rw-r--r--include/gudhi/Landmark_choice_by_furthest_point.h105
-rw-r--r--include/gudhi/Landmark_choice_by_random_point.h96
-rw-r--r--include/gudhi/Neighbors_finder.h192
-rw-r--r--include/gudhi/Null_output_iterator.h48
-rw-r--r--include/gudhi/Persistence_graph.h188
-rw-r--r--include/gudhi/Persistent_cohomology.h26
-rw-r--r--include/gudhi/Points_3D_off_io.h4
-rw-r--r--include/gudhi/Points_off_io.h13
-rw-r--r--include/gudhi/Rips_complex.h185
-rw-r--r--include/gudhi/Simplex_tree.h2
-rw-r--r--include/gudhi/Skeleton_blocker.h219
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_complex_visitor.h53
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_link_superior.h14
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_off_io.h11
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_geometric_traits.h13
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h38
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h12
-rw-r--r--include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h49
-rw-r--r--include/gudhi/Skeleton_blocker/internal/Top_faces.h5
-rw-r--r--include/gudhi/Skeleton_blocker/internal/Trie.h9
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_blockers_iterators.h3
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_edges_iterators.h3
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_iterators.h2
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_simplices_iterators.h38
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h14
-rw-r--r--include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_vertices_iterators.h12
-rw-r--r--include/gudhi/Skeleton_blocker_complex.h33
-rw-r--r--include/gudhi/Skeleton_blocker_geometric_complex.h3
-rw-r--r--include/gudhi/Skeleton_blocker_link_complex.h30
-rw-r--r--include/gudhi/Skeleton_blocker_simplifiable_complex.h13
-rw-r--r--include/gudhi/Strong_witness_complex.h185
-rw-r--r--include/gudhi/Tangential_complex.h2276
-rw-r--r--include/gudhi/Tangential_complex/Simplicial_complex.h539
-rw-r--r--include/gudhi/Tangential_complex/config.h43
-rw-r--r--include/gudhi/Tangential_complex/utilities.h195
-rw-r--r--include/gudhi/Test.h105
-rw-r--r--include/gudhi/Witness_complex.h333
-rw-r--r--include/gudhi/Witness_complex/all_faces_in.h55
-rw-r--r--include/gudhi/choose_n_farthest_points.h133
-rw-r--r--include/gudhi/console_color.h97
-rw-r--r--include/gudhi/distance_functions.h40
-rw-r--r--include/gudhi/graph_simplicial_complex.h59
-rw-r--r--include/gudhi/pick_n_random_points.h86
-rw-r--r--include/gudhi/random_point_generators.h474
-rw-r--r--include/gudhi/reader_utils.h166
-rw-r--r--include/gudhi/sparsify_point_set.h113
59 files changed, 6606 insertions, 1457 deletions
diff --git a/include/gudhi/Active_witness/Active_witness.h b/include/gudhi/Active_witness/Active_witness.h
new file mode 100644
index 00000000..d41a6811
--- /dev/null
+++ b/include/gudhi/Active_witness/Active_witness.h
@@ -0,0 +1,67 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ACTIVE_WITNESS_ACTIVE_WITNESS_H_
+#define ACTIVE_WITNESS_ACTIVE_WITNESS_H_
+
+#include <gudhi/Active_witness/Active_witness_iterator.h>
+#include <list>
+
+namespace Gudhi {
+
+namespace witness_complex {
+
+ /* \class Active_witness
+ * \brief Class representing a list of nearest neighbors to a given witness.
+ * \details Every element is a pair of a landmark identifier and the squared distance to it.
+ */
+template< typename Id_distance_pair,
+ typename INS_range >
+class Active_witness {
+ public:
+ typedef Active_witness<Id_distance_pair, INS_range> ActiveWitness;
+ typedef typename INS_range::iterator INS_iterator;
+ typedef Active_witness_iterator< ActiveWitness, Id_distance_pair, INS_iterator > iterator;
+ typedef typename std::list<Id_distance_pair> Table;
+
+ Table nearest_landmark_table_;
+ INS_range search_range_;
+ INS_iterator iterator_next_;
+ INS_iterator iterator_end_;
+
+ Active_witness(const INS_range& search_range)
+ : search_range_(search_range), iterator_next_(search_range_.begin()), iterator_end_(search_range_.end()) {
+ }
+
+ iterator begin() {
+ return iterator(this, nearest_landmark_table_.begin());
+ }
+
+ iterator end() {
+ return iterator(this);
+ }
+};
+
+} // namespace witness_complex
+} // namespace Gudhi
+
+#endif // ACTIVE_WITNESS_ACTIVE_WITNESS_H_
diff --git a/include/gudhi/Active_witness/Active_witness_iterator.h b/include/gudhi/Active_witness/Active_witness_iterator.h
new file mode 100644
index 00000000..0a05173a
--- /dev/null
+++ b/include/gudhi/Active_witness/Active_witness_iterator.h
@@ -0,0 +1,108 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ACTIVE_WITNESS_ACTIVE_WITNESS_ITERATOR_H_
+#define ACTIVE_WITNESS_ACTIVE_WITNESS_ITERATOR_H_
+
+#include <boost/iterator/iterator_facade.hpp>
+#include <list>
+
+namespace Gudhi {
+
+namespace witness_complex {
+
+/* \brief Iterator in the nearest landmark list.
+ * \details After the iterator reaches the end of the list,
+ * the list is augmented by a (nearest landmark, distance) pair if possible.
+ * If all the landmarks are present in the list, iterator returns the specific end value
+ * of the corresponding 'Active_witness' object.
+ */
+template< typename Active_witness,
+ typename Id_distance_pair,
+ typename INS_iterator >
+class Active_witness_iterator
+ : public boost::iterator_facade< Active_witness_iterator <Active_witness, Id_distance_pair, INS_iterator>,
+ Id_distance_pair const,
+ boost::forward_traversal_tag,
+ Id_distance_pair const> {
+ friend class boost::iterator_core_access;
+
+ typedef typename std::list<Id_distance_pair>::iterator Pair_iterator;
+ typedef typename Gudhi::witness_complex::Active_witness_iterator<Active_witness,
+ Id_distance_pair,
+ INS_iterator> Iterator;
+
+ Active_witness *aw_;
+ Pair_iterator lh_; // landmark handle
+ bool is_end_; // true only if the pointer is end and there are no more neighbors to add
+
+ public:
+ Active_witness_iterator(Active_witness* aw)
+ : aw_(aw), lh_(aw_->nearest_landmark_table_.end()), is_end_(true) {
+ }
+
+ Active_witness_iterator(Active_witness* aw, const Pair_iterator& lh)
+ : aw_(aw), lh_(lh) {
+ is_end_ = false;
+ if (lh_ == aw_->nearest_landmark_table_.end()) {
+ if (aw_->iterator_next_ == aw_->iterator_end_) {
+ is_end_ = true;
+ } else {
+ aw_->nearest_landmark_table_.push_back(*aw_->iterator_next_);
+ lh_ = --aw_->nearest_landmark_table_.end();
+ ++(aw_->iterator_next_);
+ }
+ }
+ }
+
+ private :
+ Id_distance_pair& dereference() const {
+ return *lh_;
+ }
+
+ bool equal(const Iterator& other) const {
+ return (is_end_ == other.is_end_) || (lh_ == other.lh_);
+ }
+
+ void increment() {
+ // the neighbor search can't be at the end iterator of a list
+ GUDHI_CHECK(!is_end_ && lh_ != aw_->nearest_landmark_table_.end(),
+ std::logic_error("Wrong active witness increment."));
+ // if the id of the current landmark is the same as the last one
+
+ lh_++;
+ if (lh_ == aw_->nearest_landmark_table_.end()) {
+ if (aw_->iterator_next_ == aw_->iterator_end_) {
+ is_end_ = true;
+ } else {
+ aw_->nearest_landmark_table_.push_back(*aw_->iterator_next_);
+ lh_ = std::prev(aw_->nearest_landmark_table_.end());
+ ++(aw_->iterator_next_);
+ }
+ }
+ }
+};
+
+} // namespace witness_complex
+} // namespace Gudhi
+
+#endif // ACTIVE_WITNESS_ACTIVE_WITNESS_ITERATOR_H_
diff --git a/include/gudhi/Alpha_complex.h b/include/gudhi/Alpha_complex.h
index 2c95ceb4..1ff95c3d 100644
--- a/include/gudhi/Alpha_complex.h
+++ b/include/gudhi/Alpha_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): Vincent Rouvreau
*
- * Copyright (C) 2015 INRIA Saclay (France)
+ * Copyright (C) 2015 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,9 +23,6 @@
#ifndef ALPHA_COMPLEX_H_
#define ALPHA_COMPLEX_H_
-// to construct a simplex_tree from Delaunay_triangulation
-#include <gudhi/graph_simplicial_complex.h>
-#include <gudhi/Simplex_tree.h>
#include <gudhi/Debug_utils.h>
// to construct Alpha_complex from a OFF file of points
#include <gudhi/Points_off_io.h>
@@ -36,6 +33,7 @@
#include <CGAL/Delaunay_triangulation.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Spatial_sort_traits_adapter_d.h>
+#include <CGAL/property_map.h> // for CGAL::Identity_property_map
#include <iostream>
#include <vector>
@@ -57,9 +55,9 @@ namespace alpha_complex {
* \ingroup alpha_complex
*
* \details
- * The data structure can be constructed from a CGAL Delaunay triangulation (for more informations on CGAL Delaunay
- * triangulation, please refer to the corresponding chapter in page http://doc.cgal.org/latest/Triangulation/) or from
- * an OFF file (cf. Points_off_reader).
+ * The data structure is constructing a CGAL Delaunay triangulation (for more informations on CGAL Delaunay
+ * triangulation, please refer to the corresponding chapter in page http://doc.cgal.org/latest/Triangulation/) from a
+ * range of points or from an OFF file (cf. Points_off_reader).
*
* Please refer to \ref alpha_complex for examples.
*
@@ -74,7 +72,7 @@ namespace alpha_complex {
*
*/
template<class Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>>
-class Alpha_complex : public Simplex_tree<> {
+class Alpha_complex {
public:
// Add an int in TDS to save point index in the structure
typedef CGAL::Triangulation_data_structure<typename Kernel::Dimension,
@@ -90,13 +88,6 @@ class Alpha_complex : public Simplex_tree<> {
typedef Kernel Geom_traits;
private:
- // From Simplex_tree
- // Type required to insert into a simplex_tree (with or without subfaces).
- typedef std::vector<Vertex_handle> Vector_vertex;
-
- // Simplex_result is the type returned from simplex_tree insert function.
- typedef typename std::pair<Simplex_handle, bool> Simplex_result;
-
typedef typename Kernel::Compute_squared_radius_d Squared_Radius;
typedef typename Kernel::Side_of_bounded_sphere_d Is_Gabriel;
typedef typename Kernel::Point_dimension_d Point_Dimension;
@@ -111,7 +102,7 @@ class Alpha_complex : public Simplex_tree<> {
typedef typename Delaunay_triangulation::size_type size_type;
// Map type to switch from simplex tree vertex handle to CGAL vertex iterator.
- typedef typename std::map< Vertex_handle, CGAL_vertex_iterator > Vector_vertex_iterator;
+ typedef typename std::map< std::size_t, CGAL_vertex_iterator > Vector_vertex_iterator;
private:
/** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator.
@@ -124,16 +115,15 @@ class Alpha_complex : public Simplex_tree<> {
public:
/** \brief Alpha_complex constructor from an OFF file name.
- * Uses the Delaunay_triangulation_off_reader to construct the Delaunay triangulation required to initialize
+ *
+ * Uses the Points_off_reader to construct the Delaunay triangulation required to initialize
* the Alpha_complex.
*
* Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
*
* @param[in] off_file_name OFF file [path and] name.
- * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$.
*/
- Alpha_complex(const std::string& off_file_name,
- Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity())
+ Alpha_complex(const std::string& off_file_name)
: triangulation_(nullptr) {
Gudhi::Points_off_reader<Point_d> off_reader(off_file_name);
if (!off_reader.is_valid()) {
@@ -141,7 +131,7 @@ class Alpha_complex : public Simplex_tree<> {
exit(-1); // ----- >>
}
- init_from_range(off_reader.get_point_cloud(), max_alpha_square);
+ init_from_range(off_reader.get_point_cloud());
}
/** \brief Alpha_complex constructor from a list of points.
@@ -149,23 +139,17 @@ class Alpha_complex : public Simplex_tree<> {
* Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
*
* @param[in] points Range of points to triangulate. Points must be in Kernel::Point_d
- * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$.
*
* The type InputPointRange must be a range for which std::begin and
* std::end return input iterators on a Kernel::Point_d.
- *
- * @post Compare num_simplices with InputPointRange points number (not the same in case of duplicate points).
*/
template<typename InputPointRange >
- Alpha_complex(const InputPointRange& points,
- Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity())
+ Alpha_complex(const InputPointRange& points)
: triangulation_(nullptr) {
- init_from_range(points, max_alpha_square);
+ init_from_range(points);
}
- /** \brief Alpha_complex destructor.
- *
- * @warning Deletes the Delaunay triangulation.
+ /** \brief Alpha_complex destructor deletes the Delaunay triangulation.
*/
~Alpha_complex() {
delete triangulation_;
@@ -183,15 +167,24 @@ class Alpha_complex : public Simplex_tree<> {
* @return The point found.
* @exception std::out_of_range In case vertex is not found (cf. std::vector::at).
*/
- Point_d get_point(Vertex_handle vertex) const {
+ const Point_d& get_point(std::size_t vertex) const {
return vertex_handle_to_iterator_.at(vertex)->point();
}
+ /** \brief number_of_vertices returns the number of vertices (same as the number of points).
+ *
+ * @return The number of vertices.
+ */
+ const std::size_t number_of_vertices() const {
+ return vertex_handle_to_iterator_.size();
+ }
+
private:
template<typename InputPointRange >
- void init_from_range(const InputPointRange& points, Filtration_value max_alpha_square) {
+ void init_from_range(const InputPointRange& points) {
auto first = std::begin(points);
auto last = std::end(points);
+
if (first != last) {
// point_dimension function initialization
Point_Dimension point_dimension = kernel_.point_dimension_d_object();
@@ -199,90 +192,107 @@ class Alpha_complex : public Simplex_tree<> {
// Delaunay triangulation is point dimension.
triangulation_ = new Delaunay_triangulation(point_dimension(*first));
- std::vector<Point_d> points(first, last);
+ std::vector<Point_d> point_cloud(first, last);
// Creates a vector {0, 1, ..., N-1}
std::vector<std::ptrdiff_t> indices(boost::counting_iterator<std::ptrdiff_t>(0),
- boost::counting_iterator<std::ptrdiff_t>(points.size()));
+ boost::counting_iterator<std::ptrdiff_t>(point_cloud.size()));
+
+ typedef boost::iterator_property_map<typename std::vector<Point_d>::iterator,
+ CGAL::Identity_property_map<std::ptrdiff_t>> Point_property_map;
+ typedef CGAL::Spatial_sort_traits_adapter_d<Kernel, Point_property_map> Search_traits_d;
- // Sort indices considering CGAL spatial sort
- typedef CGAL::Spatial_sort_traits_adapter_d<Kernel, Point_d*> Search_traits_d;
- spatial_sort(indices.begin(), indices.end(), Search_traits_d(&(points[0])));
+ CGAL::spatial_sort(indices.begin(), indices.end(), Search_traits_d(std::begin(point_cloud)));
typename Delaunay_triangulation::Full_cell_handle hint;
for (auto index : indices) {
- typename Delaunay_triangulation::Vertex_handle pos = triangulation_->insert(points[index], hint);
+ typename Delaunay_triangulation::Vertex_handle pos = triangulation_->insert(point_cloud[index], hint);
// Save index value as data to retrieve it after insertion
pos->data() = index;
hint = pos->full_cell();
}
- init(max_alpha_square);
+ // --------------------------------------------------------------------------------------------
+ // double map to retrieve simplex tree vertex handles from CGAL vertex iterator and vice versa
+ // Loop on triangulation vertices list
+ for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) {
+ if (!triangulation_->is_infinite(*vit)) {
+#ifdef DEBUG_TRACES
+ std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
+#endif // DEBUG_TRACES
+ vertex_handle_to_iterator_.emplace(vit->data(), vit);
+ }
+ }
+ // --------------------------------------------------------------------------------------------
}
}
- /** \brief Initialize the Alpha_complex from the Delaunay triangulation.
+ public:
+ template <typename SimplicialComplexForAlpha>
+ bool create_complex(SimplicialComplexForAlpha& complex) {
+ typedef typename SimplicialComplexForAlpha::Filtration_value Filtration_value;
+ return create_complex(complex, std::numeric_limits<Filtration_value>::infinity());
+ }
+
+ /** \brief Inserts all Delaunay triangulation into the simplicial complex.
+ * It also computes the filtration values accordingly to the \ref createcomplexalgorithm
*
- * @param[in] max_alpha_square maximum for alpha square value.
+ * \tparam SimplicialComplexForAlpha must meet `SimplicialComplexForAlpha` concept.
*
- * @warning Delaunay triangulation must be already constructed with at least one vertex and dimension must be more
- * than 0.
+ * @param[in] complex SimplicialComplexForAlpha to be created.
+ * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$.
+ *
+ * @return true if creation succeeds, false otherwise.
+ *
+ * @pre Delaunay triangulation must be already constructed with dimension strictly greater than 0.
+ * @pre The simplicial complex must be empty (no vertices)
*
* Initialization can be launched once.
*/
- void init(Filtration_value max_alpha_square) {
+ template <typename SimplicialComplexForAlpha, typename Filtration_value>
+ bool create_complex(SimplicialComplexForAlpha& complex, Filtration_value max_alpha_square) {
+ // From SimplicialComplexForAlpha type required to insert into a simplicial complex (with or without subfaces).
+ typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
+ typedef typename SimplicialComplexForAlpha::Simplex_handle Simplex_handle;
+ typedef std::vector<Vertex_handle> Vector_vertex;
+
if (triangulation_ == nullptr) {
- std::cerr << "Alpha_complex init - Cannot init from a NULL triangulation\n";
- return; // ----- >>
- }
- if (triangulation_->number_of_vertices() < 1) {
- std::cerr << "Alpha_complex init - Cannot init from a triangulation without vertices\n";
- return; // ----- >>
+ std::cerr << "Alpha_complex cannot create_complex from a NULL triangulation\n";
+ return false; // ----- >>
}
if (triangulation_->maximal_dimension() < 1) {
- std::cerr << "Alpha_complex init - Cannot init from a zero-dimension triangulation\n";
- return; // ----- >>
+ std::cerr << "Alpha_complex cannot create_complex from a zero-dimension triangulation\n";
+ return false; // ----- >>
}
- if (num_vertices() > 0) {
- std::cerr << "Alpha_complex init - Cannot init twice\n";
- return; // ----- >>
+ if (complex.num_vertices() > 0) {
+ std::cerr << "Alpha_complex create_complex - complex is not empty\n";
+ return false; // ----- >>
}
- set_dimension(triangulation_->maximal_dimension());
-
- // --------------------------------------------------------------------------------------------
- // double map to retrieve simplex tree vertex handles from CGAL vertex iterator and vice versa
- // Loop on triangulation vertices list
- for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) {
- if (!triangulation_->is_infinite(*vit)) {
-#ifdef DEBUG_TRACES
- std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
-#endif // DEBUG_TRACES
- vertex_handle_to_iterator_.emplace(vit->data(), vit);
- }
- }
- // --------------------------------------------------------------------------------------------
+ complex.set_dimension(triangulation_->maximal_dimension());
// --------------------------------------------------------------------------------------------
// Simplex_tree construction from loop on triangulation finite full cells list
- for (auto cit = triangulation_->finite_full_cells_begin(); cit != triangulation_->finite_full_cells_end(); ++cit) {
- Vector_vertex vertexVector;
+ if (triangulation_->number_of_vertices() > 0) {
+ for (auto cit = triangulation_->finite_full_cells_begin(); cit != triangulation_->finite_full_cells_end(); ++cit) {
+ Vector_vertex vertexVector;
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree insertion ";
+ std::cout << "Simplex_tree insertion ";
#endif // DEBUG_TRACES
- for (auto vit = cit->vertices_begin(); vit != cit->vertices_end(); ++vit) {
- if (*vit != nullptr) {
+ for (auto vit = cit->vertices_begin(); vit != cit->vertices_end(); ++vit) {
+ if (*vit != nullptr) {
#ifdef DEBUG_TRACES
- std::cout << " " << (*vit)->data();
+ std::cout << " " << (*vit)->data();
#endif // DEBUG_TRACES
- // Vector of vertex construction for simplex_tree structure
- vertexVector.push_back((*vit)->data());
+ // Vector of vertex construction for simplex_tree structure
+ vertexVector.push_back((*vit)->data());
+ }
}
- }
#ifdef DEBUG_TRACES
- std::cout << std::endl;
+ std::cout << std::endl;
#endif // DEBUG_TRACES
- // Insert each simplex and its subfaces in the simplex tree - filtration is NaN
- insert_simplex_and_subfaces(vertexVector, std::numeric_limits<double>::quiet_NaN());
+ // Insert each simplex and its subfaces in the simplex tree - filtration is NaN
+ complex.insert_simplex_and_subfaces(vertexVector, std::numeric_limits<double>::quiet_NaN());
+ }
}
// --------------------------------------------------------------------------------------------
@@ -290,16 +300,16 @@ class Alpha_complex : public Simplex_tree<> {
// Will be re-used many times
Vector_of_CGAL_points pointVector;
// ### For i : d -> 0
- for (int decr_dim = dimension(); decr_dim >= 0; decr_dim--) {
+ for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) {
// ### Foreach Sigma of dim i
- for (auto f_simplex : skeleton_simplex_range(decr_dim)) {
- int f_simplex_dim = dimension(f_simplex);
+ for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) {
+ int f_simplex_dim = complex.dimension(f_simplex);
if (decr_dim == f_simplex_dim) {
pointVector.clear();
#ifdef DEBUG_TRACES
std::cout << "Sigma of dim " << decr_dim << " is";
#endif // DEBUG_TRACES
- for (auto vertex : simplex_vertex_range(f_simplex)) {
+ for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
pointVector.push_back(get_point(vertex));
#ifdef DEBUG_TRACES
std::cout << " " << vertex;
@@ -309,7 +319,7 @@ class Alpha_complex : public Simplex_tree<> {
std::cout << std::endl;
#endif // DEBUG_TRACES
// ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma)
- if (std::isnan(filtration(f_simplex))) {
+ if (std::isnan(complex.filtration(f_simplex))) {
Filtration_value alpha_complex_filtration = 0.0;
// No need to compute squared_radius on a single point - alpha is 0.0
if (f_simplex_dim > 0) {
@@ -318,12 +328,12 @@ class Alpha_complex : public Simplex_tree<> {
alpha_complex_filtration = squared_radius(pointVector.begin(), pointVector.end());
}
- assign_filtration(f_simplex, alpha_complex_filtration);
+ complex.assign_filtration(f_simplex, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << filtration(f_simplex) << std::endl;
+ std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl;
#endif // DEBUG_TRACES
}
- propagate_alpha_filtration(f_simplex, decr_dim);
+ propagate_alpha_filtration(complex, f_simplex, decr_dim);
}
}
}
@@ -331,36 +341,41 @@ class Alpha_complex : public Simplex_tree<> {
// --------------------------------------------------------------------------------------------
// As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- bool modified_filt = make_filtration_non_decreasing();
+ complex.make_filtration_non_decreasing();
// Remove all simplices that have a filtration value greater than max_alpha_square
- // Remark: prune_above_filtration does not require initialize_filtration to be done before.
- modified_filt |= prune_above_filtration(max_alpha_square);
- if (modified_filt) {
- initialize_filtration();
- }
+ complex.prune_above_filtration(max_alpha_square);
// --------------------------------------------------------------------------------------------
+ return true;
}
- template<typename Simplex_handle>
- void propagate_alpha_filtration(Simplex_handle f_simplex, int decr_dim) {
+ private:
+ template <typename SimplicialComplexForAlpha, typename Simplex_handle>
+ void propagate_alpha_filtration(SimplicialComplexForAlpha& complex, Simplex_handle f_simplex, int decr_dim) {
+ // From SimplicialComplexForAlpha type required to assign filtration values.
+ typedef typename SimplicialComplexForAlpha::Filtration_value Filtration_value;
+#ifdef DEBUG_TRACES
+ typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
+#endif // DEBUG_TRACES
+
// ### Foreach Tau face of Sigma
- for (auto f_boundary : boundary_simplex_range(f_simplex)) {
+ for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) {
#ifdef DEBUG_TRACES
std::cout << " | --------------------------------------------------\n";
std::cout << " | Tau ";
- for (auto vertex : simplex_vertex_range(f_boundary)) {
+ for (auto vertex : complex.simplex_vertex_range(f_boundary)) {
std::cout << vertex << " ";
}
std::cout << "is a face of Sigma\n";
- std::cout << " | isnan(filtration(Tau)=" << std::isnan(filtration(f_boundary)) << std::endl;
+ std::cout << " | isnan(complex.filtration(Tau)=" << std::isnan(complex.filtration(f_boundary)) << std::endl;
#endif // DEBUG_TRACES
// ### If filt(Tau) is not NaN
- if (!std::isnan(filtration(f_boundary))) {
+ if (!std::isnan(complex.filtration(f_boundary))) {
// ### filt(Tau) = fmin(filt(Tau), filt(Sigma))
- Filtration_value alpha_complex_filtration = fmin(filtration(f_boundary), filtration(f_simplex));
- assign_filtration(f_boundary, alpha_complex_filtration);
+ Filtration_value alpha_complex_filtration = fmin(complex.filtration(f_boundary),
+ complex.filtration(f_simplex));
+ complex.assign_filtration(f_boundary, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << filtration(f_boundary) << std::endl;
+ std::cout << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << complex.filtration(f_boundary) << std::endl;
#endif // DEBUG_TRACES
// ### Else
} else {
@@ -372,12 +387,12 @@ class Alpha_complex : public Simplex_tree<> {
#ifdef DEBUG_TRACES
Vertex_handle vertexForGabriel = Vertex_handle();
#endif // DEBUG_TRACES
- for (auto vertex : simplex_vertex_range(f_boundary)) {
+ for (auto vertex : complex.simplex_vertex_range(f_boundary)) {
pointVector.push_back(get_point(vertex));
}
// Retrieve the Sigma point that is not part of Tau - parameter for is_gabriel function
Point_d point_for_gabriel;
- for (auto vertex : simplex_vertex_range(f_simplex)) {
+ for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
point_for_gabriel = get_point(vertex);
if (std::find(pointVector.begin(), pointVector.end(), point_for_gabriel) == pointVector.end()) {
#ifdef DEBUG_TRACES
@@ -398,10 +413,10 @@ class Alpha_complex : public Simplex_tree<> {
// ### If Tau is not Gabriel of Sigma
if (false == is_gab) {
// ### filt(Tau) = filt(Sigma)
- Filtration_value alpha_complex_filtration = filtration(f_simplex);
- assign_filtration(f_boundary, alpha_complex_filtration);
+ Filtration_value alpha_complex_filtration = complex.filtration(f_simplex);
+ complex.assign_filtration(f_boundary, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = filt(Sigma) = " << filtration(f_boundary) << std::endl;
+ std::cout << " | filt(Tau) = filt(Sigma) = " << complex.filtration(f_boundary) << std::endl;
#endif // DEBUG_TRACES
}
}
diff --git a/include/gudhi/Alpha_complex.h~ b/include/gudhi/Alpha_complex.h~
deleted file mode 100644
index a1900cb9..00000000
--- a/include/gudhi/Alpha_complex.h~
+++ /dev/null
@@ -1,417 +0,0 @@
-/* This file is part of the Gudhi Library. The Gudhi library
- * (Geometric Understanding in Higher Dimensions) is a generic C++
- * library for computational topology.
- *
- * Author(s): Vincent Rouvreau
- *
- * Copyright (C) 2015 INRIA Saclay (France)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef ALPHA_COMPLEX_H_
-#define ALPHA_COMPLEX_H_
-
-// to construct a simplex_tree from Delaunay_triangulation
-#include <gudhi/graph_simplicial_complex.h>
-#include <gudhi/Simplex_tree.h>
-#include <gudhi/Debug_utils.h>
-// to construct Alpha_complex from a OFF file of points
-#include <gudhi/Points_off_io.h>
-
-#include <stdlib.h>
-#include <math.h> // isnan, fmax
-
-#include <CGAL/Delaunay_triangulation.h>
-#include <CGAL/Epick_d.h>
-#include <CGAL/Spatial_sort_traits_adapter_d.h>
-
-#include <iostream>
-#include <vector>
-#include <string>
-#include <limits> // NaN
-#include <map>
-#include <utility> // std::pair
-#include <stdexcept>
-#include <numeric> // for std::iota
-
-namespace Gudhi {
-
-namespace alphacomplex {
-
-/**
- * \class Alpha_complex Alpha_complex.h gudhi/Alpha_complex.h
- * \brief Alpha complex data structure.
- *
- * \ingroup alpha_complex
- *
- * \details
- * The data structure can be constructed from a CGAL Delaunay triangulation (for more informations on CGAL Delaunay
- * triangulation, please refer to the corresponding chapter in page http://doc.cgal.org/latest/Triangulation/) or from
- * an OFF file (cf. Points_off_reader).
- *
- * Please refer to \ref alpha_complex for examples.
- *
- * The complex is a template class requiring an Epick_d <a target="_blank"
- * href="http://doc.cgal.org/latest/Kernel_d/index.html#Chapter_dD_Geometry_Kernel">dD Geometry Kernel</a>
- * \cite cgal:s-gkd-15b from CGAL as template, default value is <a target="_blank"
- * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a>
- * < <a target="_blank" href="http://doc.cgal.org/latest/Kernel_23/classCGAL_1_1Dynamic__dimension__tag.html">
- * CGAL::Dynamic_dimension_tag </a> >
- *
- * \remark When Alpha_complex is constructed with an infinite value of alpha, the complex is a Delaunay complex.
- *
- */
-template<class Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>>
-class Alpha_complex : public Simplex_tree<> {
- public:
- // Add an int in TDS to save point index in the structure
- typedef CGAL::Triangulation_data_structure<typename Kernel::Dimension,
- CGAL::Triangulation_vertex<Kernel, std::ptrdiff_t>,
- CGAL::Triangulation_full_cell<Kernel> > TDS;
- /** \brief A Delaunay triangulation of a set of points in \f$ \mathbb{R}^D\f$.*/
- typedef CGAL::Delaunay_triangulation<Kernel, TDS> Delaunay_triangulation;
-
- /** \brief A point in Euclidean space.*/
- typedef typename Kernel::Point_d Point_d;
- /** \brief Geometric traits class that provides the geometric types and predicates needed by Delaunay
- * triangulations.*/
- typedef Kernel Geom_traits;
-
- private:
- // From Simplex_tree
- // Type required to insert into a simplex_tree (with or without subfaces).
- typedef std::vector<Vertex_handle> Vector_vertex;
-
- // Simplex_result is the type returned from simplex_tree insert function.
- typedef typename std::pair<Simplex_handle, bool> Simplex_result;
-
- typedef typename Kernel::Compute_squared_radius_d Squared_Radius;
- typedef typename Kernel::Side_of_bounded_sphere_d Is_Gabriel;
- typedef typename Kernel::Point_dimension_d Point_Dimension;
-
- // Type required to compute squared radius, or side of bounded sphere on a vector of points.
- typedef typename std::vector<Point_d> Vector_of_CGAL_points;
-
- // Vertex_iterator type from CGAL.
- typedef typename Delaunay_triangulation::Vertex_iterator CGAL_vertex_iterator;
-
- // size_type type from CGAL.
- typedef typename Delaunay_triangulation::size_type size_type;
-
- // Map type to switch from simplex tree vertex handle to CGAL vertex iterator.
- typedef typename std::map< Vertex_handle, CGAL_vertex_iterator > Vector_vertex_iterator;
-
- private:
- /** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator.
- * Vertex handles are inserted sequentially, starting at 0.*/
- Vector_vertex_iterator vertex_handle_to_iterator_;
- /** \brief Pointer on the CGAL Delaunay triangulation.*/
- Delaunay_triangulation* triangulation_;
- /** \brief Kernel for triangulation_ functions access.*/
- Kernel kernel_;
-
- public:
- /** \brief Alpha_complex constructor from an OFF file name.
- * Uses the Delaunay_triangulation_off_reader to construct the Delaunay triangulation required to initialize
- * the Alpha_complex.
- *
- * Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
- *
- * @param[in] off_file_name OFF file [path and] name.
- * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$.
- */
- Alpha_complex(const std::string& off_file_name,
- Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity())
- : triangulation_(nullptr) {
- Gudhi::Points_off_reader<Point_d> off_reader(off_file_name);
- if (!off_reader.is_valid()) {
- std::cerr << "Alpha_complex - Unable to read file " << off_file_name << "\n";
- exit(-1); // ----- >>
- }
-
- init_from_range(off_reader.get_point_cloud(), max_alpha_square);
- }
-
- /** \brief Alpha_complex constructor from a list of points.
- *
- * Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
- *
- * @param[in] points Range of points to triangulate. Points must be in Kernel::Point_d
- * @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$.
- *
- * The type InputPointRange must be a range for which std::begin and
- * std::end return input iterators on a Kernel::Point_d.
- *
- * @post Compare num_simplices with InputPointRange points number (not the same in case of duplicate points).
- */
- template<typename InputPointRange >
- Alpha_complex(const InputPointRange& points,
- Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity())
- : triangulation_(nullptr) {
- init_from_range(points, max_alpha_square);
- }
-
- /** \brief Alpha_complex destructor.
- *
- * @warning Deletes the Delaunay triangulation.
- */
- ~Alpha_complex() {
- delete triangulation_;
- }
-
- // Forbid copy/move constructor/assignment operator
- Alpha_complex(const Alpha_complex& other) = delete;
- Alpha_complex& operator= (const Alpha_complex& other) = delete;
- Alpha_complex (Alpha_complex&& other) = delete;
- Alpha_complex& operator= (Alpha_complex&& other) = delete;
-
- /** \brief get_point returns the point corresponding to the vertex given as parameter.
- *
- * @param[in] vertex Vertex handle of the point to retrieve.
- * @return The point found.
- * @exception std::out_of_range In case vertex is not found (cf. std::vector::at).
- */
- Point_d get_point(Vertex_handle vertex) const {
- return vertex_handle_to_iterator_.at(vertex)->point();
- }
-
- private:
- template<typename InputPointRange >
- void init_from_range(const InputPointRange& points, Filtration_value max_alpha_square) {
- auto first = std::begin(points);
- auto last = std::end(points);
- if (first != last) {
- // point_dimension function initialization
- Point_Dimension point_dimension = kernel_.point_dimension_d_object();
-
- // Delaunay triangulation is point dimension.
- triangulation_ = new Delaunay_triangulation(point_dimension(*first));
-
- std::vector<Point_d> points(first, last);
-
- // Creates a vector {0, 1, ..., N-1}
- std::vector<std::ptrdiff_t> indices(boost::counting_iterator<std::ptrdiff_t>(0),
- boost::counting_iterator<std::ptrdiff_t>(points.size()));
-
- // Sort indices considering CGAL spatial sort
- typedef CGAL::Spatial_sort_traits_adapter_d<Kernel, Point_d*> Search_traits_d;
- spatial_sort(indices.begin(), indices.end(), Search_traits_d(&(points[0])));
-
- typename Delaunay_triangulation::Full_cell_handle hint;
- for (auto index : indices) {
- typename Delaunay_triangulation::Vertex_handle pos = triangulation_->insert(points[index], hint);
- // Save index value as data to retrieve it after insertion
- pos->data() = index;
- hint = pos->full_cell();
- }
- init(max_alpha_square);
- }
- }
-
- /** \brief Initialize the Alpha_complex from the Delaunay triangulation.
- *
- * @param[in] max_alpha_square maximum for alpha square value.
- *
- * @warning Delaunay triangulation must be already constructed with at least one vertex and dimension must be more
- * than 0.
- *
- * Initialization can be launched once.
- */
- void init(Filtration_value max_alpha_square) {
- if (triangulation_ == nullptr) {
- std::cerr << "Alpha_complex init - Cannot init from a NULL triangulation\n";
- return; // ----- >>
- }
- if (triangulation_->number_of_vertices() < 1) {
- std::cerr << "Alpha_complex init - Cannot init from a triangulation without vertices\n";
- return; // ----- >>
- }
- if (triangulation_->maximal_dimension() < 1) {
- std::cerr << "Alpha_complex init - Cannot init from a zero-dimension triangulation\n";
- return; // ----- >>
- }
- if (num_vertices() > 0) {
- std::cerr << "Alpha_complex init - Cannot init twice\n";
- return; // ----- >>
- }
-
- set_dimension(triangulation_->maximal_dimension());
-
- // --------------------------------------------------------------------------------------------
- // double map to retrieve simplex tree vertex handles from CGAL vertex iterator and vice versa
- // Loop on triangulation vertices list
- for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) {
- if (!triangulation_->is_infinite(*vit)) {
-#ifdef DEBUG_TRACES
- std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
-#endif // DEBUG_TRACES
- vertex_handle_to_iterator_.emplace(vit->data(), vit);
- }
- }
- // --------------------------------------------------------------------------------------------
-
- // --------------------------------------------------------------------------------------------
- // Simplex_tree construction from loop on triangulation finite full cells list
- for (auto cit = triangulation_->finite_full_cells_begin(); cit != triangulation_->finite_full_cells_end(); ++cit) {
- Vector_vertex vertexVector;
-#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree insertion ";
-#endif // DEBUG_TRACES
- for (auto vit = cit->vertices_begin(); vit != cit->vertices_end(); ++vit) {
- if (*vit != nullptr) {
-#ifdef DEBUG_TRACES
- std::cout << " " << (*vit)->data();
-#endif // DEBUG_TRACES
- // Vector of vertex construction for simplex_tree structure
- vertexVector.push_back((*vit)->data());
- }
- }
-#ifdef DEBUG_TRACES
- std::cout << std::endl;
-#endif // DEBUG_TRACES
- // Insert each simplex and its subfaces in the simplex tree - filtration is NaN
- insert_simplex_and_subfaces(vertexVector, std::numeric_limits<double>::quiet_NaN());
- }
- // --------------------------------------------------------------------------------------------
-
- // --------------------------------------------------------------------------------------------
- // Will be re-used many times
- Vector_of_CGAL_points pointVector;
- // ### For i : d -> 0
- for (int decr_dim = dimension(); decr_dim >= 0; decr_dim--) {
- // ### Foreach Sigma of dim i
- for (auto f_simplex : skeleton_simplex_range(decr_dim)) {
- int f_simplex_dim = dimension(f_simplex);
- if (decr_dim == f_simplex_dim) {
- pointVector.clear();
-#ifdef DEBUG_TRACES
- std::cout << "Sigma of dim " << decr_dim << " is";
-#endif // DEBUG_TRACES
- for (auto vertex : simplex_vertex_range(f_simplex)) {
- pointVector.push_back(get_point(vertex));
-#ifdef DEBUG_TRACES
- std::cout << " " << vertex;
-#endif // DEBUG_TRACES
- }
-#ifdef DEBUG_TRACES
- std::cout << std::endl;
-#endif // DEBUG_TRACES
- // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma)
- if (isnan(filtration(f_simplex))) {
- Filtration_value alpha_complex_filtration = 0.0;
- // No need to compute squared_radius on a single point - alpha is 0.0
- if (f_simplex_dim > 0) {
- // squared_radius function initialization
- Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object();
-
- alpha_complex_filtration = squared_radius(pointVector.begin(), pointVector.end());
- }
- assign_filtration(f_simplex, alpha_complex_filtration);
-#ifdef DEBUG_TRACES
- std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << filtration(f_simplex) << std::endl;
-#endif // DEBUG_TRACES
- }
- propagate_alpha_filtration(f_simplex, decr_dim);
- }
- }
- }
- // --------------------------------------------------------------------------------------------
-
- // --------------------------------------------------------------------------------------------
- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- bool modified_filt = make_filtration_non_decreasing();
- // Remove all simplices that have a filtration value greater than max_alpha_square
- // Remark: prune_above_filtration does not require initialize_filtration to be done before.
- modified_filt |= prune_above_filtration(max_alpha_square);
- if (modified_filt) {
- initialize_filtration();
- }
- // --------------------------------------------------------------------------------------------
- }
-
- template<typename Simplex_handle>
- void propagate_alpha_filtration(Simplex_handle f_simplex, int decr_dim) {
- // ### Foreach Tau face of Sigma
- for (auto f_boundary : boundary_simplex_range(f_simplex)) {
-#ifdef DEBUG_TRACES
- std::cout << " | --------------------------------------------------\n";
- std::cout << " | Tau ";
- for (auto vertex : simplex_vertex_range(f_boundary)) {
- std::cout << vertex << " ";
- }
- std::cout << "is a face of Sigma\n";
- std::cout << " | isnan(filtration(Tau)=" << isnan(filtration(f_boundary)) << std::endl;
-#endif // DEBUG_TRACES
- // ### If filt(Tau) is not NaN
- if (!isnan(filtration(f_boundary))) {
- // ### filt(Tau) = fmin(filt(Tau), filt(Sigma))
- Filtration_value alpha_complex_filtration = fmin(filtration(f_boundary), filtration(f_simplex));
- assign_filtration(f_boundary, alpha_complex_filtration);
-#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << filtration(f_boundary) << std::endl;
-#endif // DEBUG_TRACES
- // ### Else
- } else {
- // No need to compute is_gabriel for dimension <= 2
- // i.e. : Sigma = (3,1) => Tau = 1
- if (decr_dim > 1) {
- // insert the Tau points in a vector for is_gabriel function
- Vector_of_CGAL_points pointVector;
-#ifdef DEBUG_TRACES
- Vertex_handle vertexForGabriel = Vertex_handle();
-#endif // DEBUG_TRACES
- for (auto vertex : simplex_vertex_range(f_boundary)) {
- pointVector.push_back(get_point(vertex));
- }
- // Retrieve the Sigma point that is not part of Tau - parameter for is_gabriel function
- Point_d point_for_gabriel;
- for (auto vertex : simplex_vertex_range(f_simplex)) {
- point_for_gabriel = get_point(vertex);
- if (std::find(pointVector.begin(), pointVector.end(), point_for_gabriel) == pointVector.end()) {
-#ifdef DEBUG_TRACES
- // vertex is not found in Tau
- vertexForGabriel = vertex;
-#endif // DEBUG_TRACES
- // No need to continue loop
- break;
- }
- }
- // is_gabriel function initialization
- Is_Gabriel is_gabriel = kernel_.side_of_bounded_sphere_d_object();
- bool is_gab = is_gabriel(pointVector.begin(), pointVector.end(), point_for_gabriel)
- != CGAL::ON_BOUNDED_SIDE;
-#ifdef DEBUG_TRACES
- std::cout << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl;
-#endif // DEBUG_TRACES
- // ### If Tau is not Gabriel of Sigma
- if (false == is_gab) {
- // ### filt(Tau) = filt(Sigma)
- Filtration_value alpha_complex_filtration = filtration(f_simplex);
- assign_filtration(f_boundary, alpha_complex_filtration);
-#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = filt(Sigma) = " << filtration(f_boundary) << std::endl;
-#endif // DEBUG_TRACES
- }
- }
- }
- }
- }
-};
-
-} // namespace alphacomplex
-
-} // namespace Gudhi
-
-#endif // ALPHA_COMPLEX_H_
diff --git a/include/gudhi/Bottleneck.h b/include/gudhi/Bottleneck.h
new file mode 100644
index 00000000..b90a0ee0
--- /dev/null
+++ b/include/gudhi/Bottleneck.h
@@ -0,0 +1,115 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Francois Godi
+ *
+ * Copyright (C) 2015 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BOTTLENECK_H_
+#define BOTTLENECK_H_
+
+#include <gudhi/Graph_matching.h>
+
+#include <vector>
+#include <algorithm> // for max
+#include <limits> // for numeric_limits
+
+#include <cmath>
+
+namespace Gudhi {
+
+namespace persistence_diagram {
+
+double bottleneck_distance_approx(Persistence_graph& g, double e) {
+ double b_lower_bound = 0.;
+ double b_upper_bound = g.diameter_bound();
+ const double alpha = std::pow(g.size(), 1. / 5.);
+ Graph_matching m(g);
+ Graph_matching biggest_unperfect(g);
+ while (b_upper_bound - b_lower_bound > 2 * e) {
+ double step = b_lower_bound + (b_upper_bound - b_lower_bound) / alpha;
+ if (step <= b_lower_bound || step >= b_upper_bound) // Avoid precision problem
+ break;
+ m.set_r(step);
+ while (m.multi_augment()) {}; // compute a maximum matching (in the graph corresponding to the current r)
+ if (m.perfect()) {
+ m = biggest_unperfect;
+ b_upper_bound = step;
+ } else {
+ biggest_unperfect = m;
+ b_lower_bound = step;
+ }
+ }
+ return (b_lower_bound + b_upper_bound) / 2.;
+}
+
+double bottleneck_distance_exact(Persistence_graph& g) {
+ std::vector<double> sd = g.sorted_distances();
+ long lower_bound_i = 0;
+ long upper_bound_i = sd.size() - 1;
+ const double alpha = std::pow(g.size(), 1. / 5.);
+ Graph_matching m(g);
+ Graph_matching biggest_unperfect(g);
+ while (lower_bound_i != upper_bound_i) {
+ long step = lower_bound_i + static_cast<long> ((upper_bound_i - lower_bound_i - 1) / alpha);
+ m.set_r(sd.at(step));
+ while (m.multi_augment()) {}; // compute a maximum matching (in the graph corresponding to the current r)
+ if (m.perfect()) {
+ m = biggest_unperfect;
+ upper_bound_i = step;
+ } else {
+ biggest_unperfect = m;
+ lower_bound_i = step + 1;
+ }
+ }
+ return sd.at(lower_bound_i);
+}
+
+/** \brief Function to compute the Bottleneck distance between two persistence diagrams.
+ *
+ * \tparam Persistence_diagram1,Persistence_diagram2
+ * models of the concept `PersistenceDiagram`.
+ * \param[in] e
+ * \parblock
+ * If `e` is 0, this uses an expensive algorithm to compute the exact distance.
+ *
+ * If `e` is not 0, it asks for an additive `e`-approximation, and currently
+ * also allows a small multiplicative error (the last 2 or 3 bits of the
+ * mantissa may be wrong). This version of the algorithm takes advantage of the
+ * limited precision of `double` and is usually a lot faster to compute,
+ * whatever the value of `e`.
+ *
+ * Thus, by default, `e` is the smallest positive double.
+ * \endparblock
+ *
+ * \ingroup bottleneck_distance
+ */
+template<typename Persistence_diagram1, typename Persistence_diagram2>
+double bottleneck_distance(const Persistence_diagram1 &diag1, const Persistence_diagram2 &diag2,
+ double e = std::numeric_limits<double>::min()) {
+ Persistence_graph g(diag1, diag2, e);
+ if (g.bottleneck_alive() == std::numeric_limits<double>::infinity())
+ return std::numeric_limits<double>::infinity();
+ return std::max(g.bottleneck_alive(), e == 0. ? bottleneck_distance_exact(g) : bottleneck_distance_approx(g, e));
+}
+
+} // namespace persistence_diagram
+
+} // namespace Gudhi
+
+#endif // BOTTLENECK_H_
diff --git a/include/gudhi/Clock.h b/include/gudhi/Clock.h
index 04c6ffb9..77f196ca 100644
--- a/include/gudhi/Clock.h
+++ b/include/gudhi/Clock.h
@@ -27,47 +27,55 @@
#include <string>
+namespace Gudhi {
+
class Clock {
public:
- Clock() : end_called(false) {
- startTime = boost::posix_time::microsec_clock::local_time();
- }
-
- Clock(const std::string& msg_) {
- end_called = false;
- begin();
- msg = msg_;
- }
+ // Construct and start the timer
+ Clock(const std::string& msg_ = std::string())
+ : startTime(boost::posix_time::microsec_clock::local_time()),
+ end_called(false),
+ msg(msg_) { }
+ // Restart the timer
void begin() const {
end_called = false;
startTime = boost::posix_time::microsec_clock::local_time();
}
+ // Stop the timer
void end() const {
end_called = true;
endTime = boost::posix_time::microsec_clock::local_time();
}
+ std::string message() const {
+ return msg;
+ }
+
+ // Print current value to std::cout
void print() const {
std::cout << *this << std::endl;
}
friend std::ostream& operator<<(std::ostream& stream, const Clock& clock) {
- if (!clock.end_called)
- clock.end();
+ if (!clock.msg.empty())
+ stream << clock.msg << ": ";
- if (!clock.end_called) {
- stream << "end not called";
- } else {
- stream << clock.msg << ":" << clock.num_seconds() << "s";
- }
+ stream << clock.num_seconds() << "s";
return stream;
}
+ // Get the number of seconds between the timer start and:
+ // - the last call of end() if it was called
+ // - or now otherwise. In this case, the timer is not stopped.
double num_seconds() const {
- if (!end_called) return -1;
- return (endTime - startTime).total_milliseconds() / 1000.;
+ if (!end_called) {
+ auto end = boost::posix_time::microsec_clock::local_time();
+ return (end - startTime).total_milliseconds() / 1000.;
+ } else {
+ return (endTime - startTime).total_milliseconds() / 1000.;
+ }
}
private:
@@ -76,4 +84,6 @@ class Clock {
std::string msg;
};
-#endif // CLOCK_H_
+} // namespace Gudhi
+
+#endif // CLOCK_H_
diff --git a/include/gudhi/Debug_utils.h b/include/gudhi/Debug_utils.h
index 7573a9db..8ed3b7b3 100644
--- a/include/gudhi/Debug_utils.h
+++ b/include/gudhi/Debug_utils.h
@@ -33,8 +33,10 @@
// Could assert in release mode, but cmake sets NDEBUG (for "NO DEBUG") in this mode, means assert does nothing.
#ifdef GUDHI_DEBUG
#define GUDHI_CHECK(expression, excpt) if ((expression) == 0) throw excpt
+ #define GUDHI_CHECK_code(CODE) CODE
#else
#define GUDHI_CHECK(expression, excpt) (void) 0
+ #define GUDHI_CHECK_code(CODE)
#endif
#define PRINT(a) std::cerr << #a << ": " << (a) << " (DISP)" << std::endl
diff --git a/include/gudhi/Edge_contraction.h b/include/gudhi/Edge_contraction.h
index 5af13c3e..61f2d945 100644
--- a/include/gudhi/Edge_contraction.h
+++ b/include/gudhi/Edge_contraction.h
@@ -41,7 +41,7 @@ namespace contraction {
\author David Salinas
-\section Introduction
+\section edgecontractionintroduction Introduction
The purpose of this package is to offer a user-friendly interface for edge contraction simplification of huge simplicial complexes.
It uses the \ref skbl data-structure whose size remains small during simplification
diff --git a/include/gudhi/Euclidean_strong_witness_complex.h b/include/gudhi/Euclidean_strong_witness_complex.h
new file mode 100644
index 00000000..fb669ef8
--- /dev/null
+++ b/include/gudhi/Euclidean_strong_witness_complex.h
@@ -0,0 +1,104 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2015 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EUCLIDEAN_STRONG_WITNESS_COMPLEX_H_
+#define EUCLIDEAN_STRONG_WITNESS_COMPLEX_H_
+
+#include <gudhi/Strong_witness_complex.h>
+#include <gudhi/Active_witness/Active_witness.h>
+#include <gudhi/Kd_tree_search.h>
+
+#include <utility>
+#include <vector>
+
+namespace Gudhi {
+
+namespace witness_complex {
+
+/**
+ * \private
+ * \class Euclidean_strong_witness_complex
+ * \brief Constructs strong witness complex for given sets of witnesses and landmarks in Euclidean space.
+ * \ingroup witness_complex
+ *
+ * \tparam Kernel_ requires a <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a> class.
+ */
+template< class Kernel_ >
+class Euclidean_strong_witness_complex
+ : public Strong_witness_complex<std::vector<typename Gudhi::spatial_searching::Kd_tree_search<Kernel_,
+ std::vector<typename Kernel_::Point_d>>::INS_range>> {
+ private:
+ typedef Kernel_ K;
+ typedef typename K::Point_d Point_d;
+ typedef std::vector<Point_d> Point_range;
+ typedef Gudhi::spatial_searching::Kd_tree_search<Kernel_, Point_range> Kd_tree;
+ typedef typename Kd_tree::INS_range Nearest_landmark_range;
+ typedef typename std::vector<Nearest_landmark_range> Nearest_landmark_table;
+
+ typedef typename Nearest_landmark_range::Point_with_transformed_distance Id_distance_pair;
+ typedef typename Id_distance_pair::first_type Landmark_id;
+ typedef Landmark_id Vertex_handle;
+
+ private:
+ Point_range landmarks_;
+ Kd_tree landmark_tree_;
+ using Strong_witness_complex<Nearest_landmark_table>::nearest_landmark_table_;
+
+ public:
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /* @name Constructor
+ */
+
+ //@{
+
+ /**
+ * \brief Initializes member variables before constructing simplicial complex.
+ * \details Records landmarks from the range 'landmarks' into a
+ * table internally, as well as witnesses from the range 'witnesses'.
+ * Both ranges should have value_type Kernel_::Point_d.
+ */
+ template< typename LandmarkRange,
+ typename WitnessRange >
+ Euclidean_strong_witness_complex(const LandmarkRange & landmarks,
+ const WitnessRange & witnesses)
+ : landmarks_(std::begin(landmarks), std::end(landmarks)), landmark_tree_(landmarks_) {
+ nearest_landmark_table_.reserve(boost::size(witnesses));
+ for (auto w : witnesses)
+ nearest_landmark_table_.push_back(landmark_tree_.query_incremental_nearest_neighbors(w));
+ }
+
+ /** \brief Returns the point corresponding to the given vertex.
+ */
+ template <typename Vertex_handle>
+ Point_d get_point(Vertex_handle vertex) const {
+ return landmarks_[vertex];
+ }
+
+ //@}
+};
+
+} // namespace witness_complex
+
+} // namespace Gudhi
+
+#endif // EUCLIDEAN_STRONG_WITNESS_COMPLEX_H_
diff --git a/include/gudhi/Euclidean_witness_complex.h b/include/gudhi/Euclidean_witness_complex.h
new file mode 100644
index 00000000..6afe9a5d
--- /dev/null
+++ b/include/gudhi/Euclidean_witness_complex.h
@@ -0,0 +1,106 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2015 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EUCLIDEAN_WITNESS_COMPLEX_H_
+#define EUCLIDEAN_WITNESS_COMPLEX_H_
+
+#include <gudhi/Witness_complex.h>
+#include <gudhi/Active_witness/Active_witness.h>
+#include <gudhi/Kd_tree_search.h>
+
+#include <utility>
+#include <vector>
+#include <list>
+#include <limits>
+
+namespace Gudhi {
+
+namespace witness_complex {
+
+/**
+ * \private
+ * \class Euclidean_witness_complex
+ * \brief Constructs (weak) witness complex for given sets of witnesses and landmarks in Euclidean space.
+ * \ingroup witness_complex
+ *
+ * \tparam Kernel_ requires a <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a> class.
+ */
+template< class Kernel_ >
+class Euclidean_witness_complex
+ : public Witness_complex<std::vector<typename Gudhi::spatial_searching::Kd_tree_search<Kernel_,
+ std::vector<typename Kernel_::Point_d>>::INS_range>> {
+ private:
+ typedef Kernel_ K;
+ typedef typename K::Point_d Point_d;
+ typedef std::vector<Point_d> Point_range;
+ typedef Gudhi::spatial_searching::Kd_tree_search<Kernel_, Point_range> Kd_tree;
+ typedef typename Kd_tree::INS_range Nearest_landmark_range;
+ typedef typename std::vector<Nearest_landmark_range> Nearest_landmark_table;
+
+ typedef typename Nearest_landmark_range::Point_with_transformed_distance Id_distance_pair;
+ typedef typename Id_distance_pair::first_type Landmark_id;
+ typedef Landmark_id Vertex_handle;
+
+ private:
+ Point_range landmarks_;
+ Kd_tree landmark_tree_;
+ using Witness_complex<Nearest_landmark_table>::nearest_landmark_table_;
+
+ public:
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /* @name Constructor
+ */
+
+ //@{
+
+ /**
+ * \brief Initializes member variables before constructing simplicial complex.
+ * \details Records landmarks from the range 'landmarks' into a
+ * table internally, as well as witnesses from the range 'witnesses'.
+ * Both ranges should have value_type Kernel_::Point_d.
+ */
+ template< typename LandmarkRange,
+ typename WitnessRange >
+ Euclidean_witness_complex(const LandmarkRange & landmarks,
+ const WitnessRange & witnesses)
+ : landmarks_(std::begin(landmarks), std::end(landmarks)), landmark_tree_(landmarks) {
+ nearest_landmark_table_.reserve(boost::size(witnesses));
+ for (auto w : witnesses)
+ nearest_landmark_table_.push_back(landmark_tree_.query_incremental_nearest_neighbors(w));
+ }
+
+ /** \brief Returns the point corresponding to the given vertex.
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ */
+ Point_d get_point(Vertex_handle vertex) const {
+ return landmarks_[vertex];
+ }
+
+ //@}
+};
+
+} // namespace witness_complex
+
+} // namespace Gudhi
+
+#endif // EUCLIDEAN_WITNESS_COMPLEX_H_
diff --git a/include/gudhi/Graph_matching.h b/include/gudhi/Graph_matching.h
new file mode 100644
index 00000000..f51e22e9
--- /dev/null
+++ b/include/gudhi/Graph_matching.h
@@ -0,0 +1,174 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Francois Godi
+ *
+ * Copyright (C) 2015 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRAPH_MATCHING_H_
+#define GRAPH_MATCHING_H_
+
+#include <gudhi/Neighbors_finder.h>
+
+#include <vector>
+#include <unordered_set>
+#include <algorithm>
+
+namespace Gudhi {
+
+namespace persistence_diagram {
+
+/** \internal \brief Structure representing a graph matching. The graph is a Persistence_diagrams_graph.
+ *
+ * \ingroup bottleneck_distance
+ */
+class Graph_matching {
+ public:
+ /** \internal \brief Constructor constructing an empty matching. */
+ explicit Graph_matching(Persistence_graph &g);
+ /** \internal \brief Is the matching perfect ? */
+ bool perfect() const;
+ /** \internal \brief Augments the matching with a maximal set of edge-disjoint shortest augmenting paths. */
+ bool multi_augment();
+ /** \internal \brief Sets the maximum length of the edges allowed to be added in the matching, 0 initially. */
+ void set_r(double r);
+
+ private:
+ Persistence_graph* gp;
+ double r;
+ /** \internal \brief Given a point from V, provides its matched point in U, null_point_index() if there isn't. */
+ std::vector<int> v_to_u;
+ /** \internal \brief All the unmatched points in U. */
+ std::unordered_set<int> unmatched_in_u;
+
+ /** \internal \brief Provides a Layered_neighbors_finder dividing the graph in layers. Basically a BFS. */
+ Layered_neighbors_finder layering() const;
+ /** \internal \brief Augments the matching with a simple path no longer than max_depth. Basically a DFS. */
+ bool augment(Layered_neighbors_finder & layered_nf, int u_start_index, int max_depth);
+ /** \internal \brief Update the matching with the simple augmenting path given as parameter. */
+ void update(std::vector<int> & path);
+};
+
+inline Graph_matching::Graph_matching(Persistence_graph& g)
+ : gp(&g), r(0.), v_to_u(g.size(), null_point_index()), unmatched_in_u(g.size()) {
+ for (int u_point_index = 0; u_point_index < g.size(); ++u_point_index)
+ unmatched_in_u.insert(u_point_index);
+}
+
+inline bool Graph_matching::perfect() const {
+ return unmatched_in_u.empty();
+}
+
+inline bool Graph_matching::multi_augment() {
+ if (perfect())
+ return false;
+ Layered_neighbors_finder layered_nf(layering());
+ int max_depth = layered_nf.vlayers_number()*2 - 1;
+ double rn = sqrt(gp->size());
+ // verification of a necessary criterion in order to shortcut if possible
+ if (max_depth < 0 || (unmatched_in_u.size() > rn && max_depth >= rn))
+ return false;
+ bool successful = false;
+ std::vector<int> tries(unmatched_in_u.cbegin(), unmatched_in_u.cend());
+ for (auto it = tries.cbegin(); it != tries.cend(); it++)
+ // 'augment' has side-effects which have to be always executed, don't change order
+ successful = augment(layered_nf, *it, max_depth) || successful;
+ return successful;
+}
+
+inline void Graph_matching::set_r(double r) {
+ this->r = r;
+}
+
+inline bool Graph_matching::augment(Layered_neighbors_finder & layered_nf, int u_start_index, int max_depth) {
+ // V vertices have at most one successor, thus when we backtrack from U we can directly pop_back 2 vertices.
+ std::vector<int> path;
+ path.emplace_back(u_start_index);
+ do {
+ if (static_cast<int> (path.size()) > max_depth) {
+ path.pop_back();
+ path.pop_back();
+ }
+ if (path.empty())
+ return false;
+ path.emplace_back(layered_nf.pull_near(path.back(), static_cast<int> (path.size()) / 2));
+ while (path.back() == null_point_index()) {
+ path.pop_back();
+ path.pop_back();
+ if (path.empty())
+ return false;
+ path.pop_back();
+ path.emplace_back(layered_nf.pull_near(path.back(), path.size() / 2));
+ }
+ path.emplace_back(v_to_u.at(path.back()));
+ } while (path.back() != null_point_index());
+ // if v_to_u.at(path.back()) has no successor, path.back() is an exposed vertex
+ path.pop_back();
+ update(path);
+ return true;
+}
+
+inline Layered_neighbors_finder Graph_matching::layering() const {
+ std::vector<int> u_vertices(unmatched_in_u.cbegin(), unmatched_in_u.cend());
+ std::vector<int> v_vertices;
+ Neighbors_finder nf(*gp, r);
+ for (int v_point_index = 0; v_point_index < gp->size(); ++v_point_index)
+ nf.add(v_point_index);
+ Layered_neighbors_finder layered_nf(*gp, r);
+ for (int layer = 0; !u_vertices.empty(); layer++) {
+ // one layer is one step in the BFS
+ for (auto it1 = u_vertices.cbegin(); it1 != u_vertices.cend(); ++it1) {
+ std::vector<int> u_succ(nf.pull_all_near(*it1));
+ for (auto it2 = u_succ.begin(); it2 != u_succ.end(); ++it2) {
+ layered_nf.add(*it2, layer);
+ v_vertices.emplace_back(*it2);
+ }
+ }
+ // When the above for finishes, we have progress of one half-step (from U to V) in the BFS
+ u_vertices.clear();
+ bool end = false;
+ for (auto it = v_vertices.cbegin(); it != v_vertices.cend(); it++)
+ if (v_to_u.at(*it) == null_point_index())
+ // we stop when a nearest exposed V vertex (from U exposed vertices) has been found
+ end = true;
+ else
+ u_vertices.emplace_back(v_to_u.at(*it));
+ // When the above for finishes, we have progress of one half-step (from V to U) in the BFS
+ if (end)
+ return layered_nf;
+ v_vertices.clear();
+ }
+ return layered_nf;
+}
+
+inline void Graph_matching::update(std::vector<int>& path) {
+ // Must return 1.
+ unmatched_in_u.erase(path.front());
+ for (auto it = path.cbegin(); it != path.cend(); ++it) {
+ // Be careful, the iterator is incremented twice each time
+ int tmp = *it;
+ v_to_u[*(++it)] = tmp;
+ }
+}
+
+
+} // namespace persistence_diagram
+
+} // namespace Gudhi
+
+#endif // GRAPH_MATCHING_H_
diff --git a/include/gudhi/Internal_point.h b/include/gudhi/Internal_point.h
new file mode 100644
index 00000000..0b2d26fe
--- /dev/null
+++ b/include/gudhi/Internal_point.h
@@ -0,0 +1,91 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Francois Godi
+ *
+ * Copyright (C) 2015 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef INTERNAL_POINT_H_
+#define INTERNAL_POINT_H_
+
+namespace Gudhi {
+
+namespace persistence_diagram {
+
+/** \internal \brief Returns the used index for encoding none of the points */
+int null_point_index();
+
+/** \internal \typedef \brief Internal_point is the internal points representation, indexes used outside. */
+struct Internal_point {
+ double vec[2];
+ int point_index;
+
+ Internal_point() { }
+
+ Internal_point(double x, double y, int p_i) {
+ vec[0] = x;
+ vec[1] = y;
+ point_index = p_i;
+ }
+
+ double x() const {
+ return vec[ 0 ];
+ }
+
+ double y() const {
+ return vec[ 1 ];
+ }
+
+ double& x() {
+ return vec[ 0 ];
+ }
+
+ double& y() {
+ return vec[ 1 ];
+ }
+
+ bool operator==(const Internal_point& p) const {
+ return point_index == p.point_index;
+ }
+
+ bool operator!=(const Internal_point& p) const {
+ return !(*this == p);
+ }
+};
+
+inline int null_point_index() {
+ return -1;
+}
+
+struct Construct_coord_iterator {
+ typedef const double* result_type;
+
+ const double* operator()(const Internal_point& p) const {
+ return p.vec;
+ }
+
+ const double* operator()(const Internal_point& p, int) const {
+ return p.vec + 2;
+ }
+};
+
+} // namespace persistence_diagram
+
+} // namespace Gudhi
+
+#endif // INTERNAL_POINT_H_
diff --git a/include/gudhi/Kd_tree_search.h b/include/gudhi/Kd_tree_search.h
new file mode 100644
index 00000000..6728d56e
--- /dev/null
+++ b/include/gudhi/Kd_tree_search.h
@@ -0,0 +1,264 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef KD_TREE_SEARCH_H_
+#define KD_TREE_SEARCH_H_
+
+#include <CGAL/Orthogonal_k_neighbor_search.h>
+#include <CGAL/Orthogonal_incremental_neighbor_search.h>
+#include <CGAL/Search_traits.h>
+#include <CGAL/Search_traits_adapter.h>
+#include <CGAL/property_map.h>
+
+#include <boost/property_map/property_map.hpp>
+#include <boost/iterator/counting_iterator.hpp>
+
+#include <cstddef>
+#include <vector>
+
+namespace Gudhi {
+namespace spatial_searching {
+
+
+ /**
+ * \class Kd_tree_search Kd_tree_search.h gudhi/Kd_tree_search.h
+ * \brief Spatial tree data structure to perform (approximate) nearest and farthest neighbor search.
+ *
+ * \ingroup spatial_searching
+ *
+ * \details
+ * The class Kd_tree_search is a tree-based data structure, based on
+ * <a target="_blank" href="http://doc.cgal.org/latest/Spatial_searching/index.html">CGAL dD spatial searching data structures</a>.
+ * It provides a simplified API to perform (approximate) nearest and farthest neighbor searches. Contrary to CGAL default behavior, the tree
+ * does not store the points themselves, but stores indices.
+ *
+ * There are two types of queries: the <i>k-nearest or k-farthest neighbor query</i>, where <i>k</i> is fixed and the <i>k</i> nearest
+ * or farthest points are computed right away,
+ * and the <i>incremental nearest or farthest neighbor query</i>, where no number of neighbors is provided during the call, as the
+ * neighbors will be computed incrementally when the iterator on the range is incremented.
+ *
+ * \tparam Search_traits must be a model of the <a target="_blank"
+ * href="http://doc.cgal.org/latest/Spatial_searching/classSearchTraits.html">SearchTraits</a>
+ * concept, such as the <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a> class, which
+ * can be static if you know the ambiant dimension at compile-time, or dynamic if you don't.
+ * \tparam Point_range is the type of the range that provides the points.
+ * It must be a range whose iterator type is a `RandomAccessIterator`.
+ */
+template <typename Search_traits, typename Point_range>
+class Kd_tree_search {
+ typedef boost::iterator_property_map<
+ typename Point_range::const_iterator,
+ CGAL::Identity_property_map<std::ptrdiff_t> > Point_property_map;
+
+ public:
+ /// The Traits.
+ typedef Search_traits Traits;
+ /// Number type used for distances.
+ typedef typename Traits::FT FT;
+ /// The point type.
+ typedef typename Point_range::value_type Point;
+
+ typedef CGAL::Search_traits<
+ FT, Point,
+ typename Traits::Cartesian_const_iterator_d,
+ typename Traits::Construct_cartesian_const_iterator_d> Traits_base;
+
+ typedef CGAL::Search_traits_adapter<
+ std::ptrdiff_t,
+ Point_property_map,
+ Traits_base> STraits;
+
+ typedef CGAL::Orthogonal_k_neighbor_search<STraits> K_neighbor_search;
+ typedef typename K_neighbor_search::Tree Tree;
+ typedef typename K_neighbor_search::Distance Distance;
+ /// \brief The range returned by a k-nearest or k-farthest neighbor search.
+ /// Its value type is `std::pair<std::size_t, FT>` where `first` is the index
+ /// of a point P and `second` is the squared distance between P and the query point.
+ typedef K_neighbor_search KNS_range;
+
+ typedef CGAL::Orthogonal_incremental_neighbor_search<
+ STraits, Distance, CGAL::Sliding_midpoint<STraits>, Tree>
+ Incremental_neighbor_search;
+ /// \brief The range returned by an incremental nearest or farthest neighbor search.
+ /// Its value type is `std::pair<std::size_t, FT>` where `first` is the index
+ /// of a point P and `second` is the squared distance between P and the query point.
+ typedef Incremental_neighbor_search INS_range;
+
+ /// \brief Constructor
+ /// @param[in] points Const reference to the point range. This range
+ /// is not copied, so it should not be destroyed or modified afterwards.
+ Kd_tree_search(Point_range const& points)
+ : m_points(points),
+ m_tree(boost::counting_iterator<std::ptrdiff_t>(0),
+ boost::counting_iterator<std::ptrdiff_t>(points.size()),
+ typename Tree::Splitter(),
+ STraits(std::begin(points))) {
+ // Build the tree now (we don't want to wait for the first query)
+ m_tree.build();
+ }
+
+ /// \brief Constructor
+ /// @param[in] points Const reference to the point range. This range
+ /// is not copied, so it should not be destroyed or modified afterwards.
+ /// @param[in] only_these_points Specifies the indices of the points that
+ /// should be actually inserted into the tree. The other points are ignored.
+ template <typename Point_indices_range>
+ Kd_tree_search(
+ Point_range const& points,
+ Point_indices_range const& only_these_points)
+ : m_points(points),
+ m_tree(
+ only_these_points.begin(), only_these_points.end(),
+ typename Tree::Splitter(),
+ STraits(std::begin(points))) {
+ // Build the tree now (we don't want to wait for the first query)
+ m_tree.build();
+ }
+
+ /// \brief Constructor
+ /// @param[in] points Const reference to the point range. This range
+ /// is not copied, so it should not be destroyed or modified afterwards.
+ /// @param[in] begin_idx, past_the_end_idx Define the subset of the points that
+ /// should be actually inserted into the tree. The other points are ignored.
+ Kd_tree_search(
+ Point_range const& points,
+ std::size_t begin_idx, std::size_t past_the_end_idx)
+ : m_points(points),
+ m_tree(
+ boost::counting_iterator<std::ptrdiff_t>(begin_idx),
+ boost::counting_iterator<std::ptrdiff_t>(past_the_end_idx),
+ typename Tree::Splitter(),
+ STraits(std::begin(points))) {
+ // Build the tree now (we don't want to wait for the first query)
+ m_tree.build();
+ }
+
+ // Be careful, this function invalidates the tree,
+ // which will be recomputed at the next query
+ void insert(std::ptrdiff_t point_idx) {
+ m_tree.insert(point_idx);
+ }
+
+ /// \brief Search for the k-nearest neighbors from a query point.
+ /// @param[in] p The query point.
+ /// @param[in] k Number of nearest points to search.
+ /// @param[in] sorted Indicates if the computed sequence of k-nearest neighbors needs to be sorted.
+ /// @param[in] eps Approximation factor.
+ /// @return A range containing the k-nearest neighbors.
+ KNS_range query_k_nearest_neighbors(const
+ Point &p,
+ unsigned int k,
+ bool sorted = true,
+ FT eps = FT(0)) const {
+ // Initialize the search structure, and search all N points
+ // Note that we need to pass the Distance explicitly since it needs to
+ // know the property map
+ K_neighbor_search search(
+ m_tree,
+ p,
+ k,
+ eps,
+ true,
+ CGAL::Distance_adapter<std::ptrdiff_t, Point_property_map, CGAL::Euclidean_distance<Traits_base> >(
+ std::begin(m_points)), sorted);
+
+ return search;
+ }
+
+ /// \brief Search incrementally for the nearest neighbors from a query point.
+ /// @param[in] p The query point.
+ /// @param[in] eps Approximation factor.
+ /// @return A range containing the neighbors sorted by their distance to p.
+ /// All the neighbors are not computed by this function, but they will be
+ /// computed incrementally when the iterator on the range is incremented.
+ INS_range query_incremental_nearest_neighbors(const Point &p, FT eps = FT(0)) const {
+ // Initialize the search structure, and search all N points
+ // Note that we need to pass the Distance explicitly since it needs to
+ // know the property map
+ Incremental_neighbor_search search(
+ m_tree,
+ p,
+ eps,
+ true,
+ CGAL::Distance_adapter<std::ptrdiff_t, Point_property_map, CGAL::Euclidean_distance<Traits_base> >(
+ std::begin(m_points)) );
+
+ return search;
+ }
+
+ /// \brief Search for the k-farthest points from a query point.
+ /// @param[in] p The query point.
+ /// @param[in] k Number of farthest points to search.
+ /// @param[in] sorted Indicates if the computed sequence of k-farthest neighbors needs to be sorted.
+ /// @param[in] eps Approximation factor.
+ /// @return A range containing the k-farthest neighbors.
+ KNS_range query_k_farthest_neighbors(const
+ Point &p,
+ unsigned int k,
+ bool sorted = true,
+ FT eps = FT(0)) const {
+ // Initialize the search structure, and search all N points
+ // Note that we need to pass the Distance explicitly since it needs to
+ // know the property map
+ K_neighbor_search search(
+ m_tree,
+ p,
+ k,
+ eps,
+ false,
+ CGAL::Distance_adapter<std::ptrdiff_t, Point_property_map, CGAL::Euclidean_distance<Traits_base> >(
+ std::begin(m_points)), sorted);
+
+ return search;
+ }
+
+ /// \brief Search incrementally for the farthest neighbors from a query point.
+ /// @param[in] p The query point.
+ /// @param[in] eps Approximation factor.
+ /// @return A range containing the neighbors sorted by their distance to p.
+ /// All the neighbors are not computed by this function, but they will be
+ /// computed incrementally when the iterator on the range is incremented.
+ INS_range query_incremental_farthest_neighbors(const Point &p, FT eps = FT(0)) const {
+ // Initialize the search structure, and search all N points
+ // Note that we need to pass the Distance explicitly since it needs to
+ // know the property map
+ Incremental_neighbor_search search(
+ m_tree,
+ p,
+ eps,
+ false,
+ CGAL::Distance_adapter<std::ptrdiff_t, Point_property_map, CGAL::Euclidean_distance<Traits_base> >(
+ std::begin(m_points)) );
+
+ return search;
+ }
+
+ private:
+ Point_range const& m_points;
+ Tree m_tree;
+};
+
+} // namespace spatial_searching
+} // namespace Gudhi
+
+#endif // KD_TREE_SEARCH_H_
diff --git a/include/gudhi/Landmark_choice_by_furthest_point.h b/include/gudhi/Landmark_choice_by_furthest_point.h
deleted file mode 100644
index df93155b..00000000
--- a/include/gudhi/Landmark_choice_by_furthest_point.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* This file is part of the Gudhi Library. The Gudhi library
- * (Geometric Understanding in Higher Dimensions) is a generic C++
- * library for computational topology.
- *
- * Author(s): Siargey Kachanovich
- *
- * Copyright (C) 2015 INRIA Sophia Antipolis-Méditerranée (France)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef LANDMARK_CHOICE_BY_FURTHEST_POINT_H_
-#define LANDMARK_CHOICE_BY_FURTHEST_POINT_H_
-
-#include <boost/range/size.hpp>
-
-#include <limits> // for numeric_limits<>
-#include <iterator>
-#include <algorithm> // for sort
-#include <vector>
-
-namespace Gudhi {
-
-namespace witness_complex {
-
- typedef std::vector<int> typeVectorVertex;
-
- /**
- * \ingroup witness_complex
- * \brief Landmark choice strategy by iteratively adding the furthest witness from the
- * current landmark set as the new landmark.
- * \details It chooses nbL landmarks from a random access range `points` and
- * writes {witness}*{closest landmarks} matrix in `knn`.
- *
- * The type KNearestNeighbors can be seen as
- * Witness_range<Closest_landmark_range<Vertex_handle>>, where
- * Witness_range and Closest_landmark_range are random access ranges
- *
- */
-
- template <typename KNearestNeighbours,
- typename Point_random_access_range>
- void landmark_choice_by_furthest_point(Point_random_access_range const &points,
- int nbL,
- KNearestNeighbours &knn) {
- int nb_points = boost::size(points);
- assert(nb_points >= nbL);
- // distance matrix witness x landmarks
- std::vector<std::vector<double>> wit_land_dist(nb_points, std::vector<double>());
- // landmark list
- typeVectorVertex chosen_landmarks;
-
- knn = KNearestNeighbours(nb_points, std::vector<int>());
- int current_number_of_landmarks = 0; // counter for landmarks
- double curr_max_dist = 0; // used for defining the furhest point from L
- const double infty = std::numeric_limits<double>::infinity(); // infinity (see next entry)
- std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from points
-
- // TODO(SK) Consider using rand_r(...) instead of rand(...) for improved thread safety
- // or better yet std::uniform_int_distribution
- int rand_int = rand() % nb_points;
- int curr_max_w = rand_int; // For testing purposes a pseudo-random number is used here
-
- for (current_number_of_landmarks = 0; current_number_of_landmarks != nbL; current_number_of_landmarks++) {
- // curr_max_w at this point is the next landmark
- chosen_landmarks.push_back(curr_max_w);
- unsigned i = 0;
- for (auto& p : points) {
- double curr_dist = euclidean_distance(p, *(std::begin(points) + chosen_landmarks[current_number_of_landmarks]));
- wit_land_dist[i].push_back(curr_dist);
- knn[i].push_back(current_number_of_landmarks);
- if (curr_dist < dist_to_L[i])
- dist_to_L[i] = curr_dist;
- ++i;
- }
- curr_max_dist = 0;
- for (i = 0; i < dist_to_L.size(); i++)
- if (dist_to_L[i] > curr_max_dist) {
- curr_max_dist = dist_to_L[i];
- curr_max_w = i;
- }
- }
- for (int i = 0; i < nb_points; ++i)
- std::sort(std::begin(knn[i]),
- std::end(knn[i]),
- [&wit_land_dist, i](int a, int b) {
- return wit_land_dist[i][a] < wit_land_dist[i][b]; });
- }
-
-} // namespace witness_complex
-
-} // namespace Gudhi
-
-#endif // LANDMARK_CHOICE_BY_FURTHEST_POINT_H_
diff --git a/include/gudhi/Landmark_choice_by_random_point.h b/include/gudhi/Landmark_choice_by_random_point.h
deleted file mode 100644
index ebf6aad1..00000000
--- a/include/gudhi/Landmark_choice_by_random_point.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* This file is part of the Gudhi Library. The Gudhi library
- * (Geometric Understanding in Higher Dimensions) is a generic C++
- * library for computational topology.
- *
- * Author(s): Siargey Kachanovich
- *
- * Copyright (C) 2015 INRIA Sophia Antipolis-Méditerranée (France)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef LANDMARK_CHOICE_BY_RANDOM_POINT_H_
-#define LANDMARK_CHOICE_BY_RANDOM_POINT_H_
-
-#include <boost/range/size.hpp>
-
-#include <queue> // for priority_queue<>
-#include <utility> // for pair<>
-#include <iterator>
-#include <vector>
-#include <set>
-
-namespace Gudhi {
-
-namespace witness_complex {
-
- /**
- * \ingroup witness_complex
- * \brief Landmark choice strategy by taking random vertices for landmarks.
- * \details It chooses nbL distinct landmarks from a random access range `points`
- * and outputs a matrix {witness}*{closest landmarks} in knn.
- *
- * The type KNearestNeighbors can be seen as
- * Witness_range<Closest_landmark_range<Vertex_handle>>, where
- * Witness_range and Closest_landmark_range are random access ranges and
- * Vertex_handle is the label type of a vertex in a simplicial complex.
- * Closest_landmark_range needs to have push_back operation.
- */
-
- template <typename KNearestNeighbours,
- typename Point_random_access_range>
- void landmark_choice_by_random_point(Point_random_access_range const &points,
- int nbL,
- KNearestNeighbours &knn) {
- int nbP = boost::size(points);
- assert(nbP >= nbL);
- std::set<int> landmarks;
- int current_number_of_landmarks = 0; // counter for landmarks
-
- // TODO(SK) Consider using rand_r(...) instead of rand(...) for improved thread safety
- int chosen_landmark = rand() % nbP;
- for (current_number_of_landmarks = 0; current_number_of_landmarks != nbL; current_number_of_landmarks++) {
- while (landmarks.find(chosen_landmark) != landmarks.end())
- chosen_landmark = rand() % nbP;
- landmarks.insert(chosen_landmark);
- }
-
- int dim = boost::size(*std::begin(points));
- typedef std::pair<double, int> dist_i;
- typedef bool (*comp)(dist_i, dist_i);
- knn = KNearestNeighbours(nbP);
- for (int points_i = 0; points_i < nbP; points_i++) {
- std::priority_queue<dist_i, std::vector<dist_i>, comp> l_heap([](dist_i j1, dist_i j2) {
- return j1.first > j2.first;
- });
- std::set<int>::iterator landmarks_it;
- int landmarks_i = 0;
- for (landmarks_it = landmarks.begin(), landmarks_i = 0; landmarks_it != landmarks.end();
- ++landmarks_it, landmarks_i++) {
- dist_i dist = std::make_pair(euclidean_distance(points[points_i], points[*landmarks_it]), landmarks_i);
- l_heap.push(dist);
- }
- for (int i = 0; i < dim + 1; i++) {
- dist_i dist = l_heap.top();
- knn[points_i].push_back(dist.second);
- l_heap.pop();
- }
- }
- }
-
-} // namespace witness_complex
-
-} // namespace Gudhi
-
-#endif // LANDMARK_CHOICE_BY_RANDOM_POINT_H_
diff --git a/include/gudhi/Neighbors_finder.h b/include/gudhi/Neighbors_finder.h
new file mode 100644
index 00000000..bdc47578
--- /dev/null
+++ b/include/gudhi/Neighbors_finder.h
@@ -0,0 +1,192 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Francois Godi
+ *
+ * Copyright (C) 2015 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NEIGHBORS_FINDER_H_
+#define NEIGHBORS_FINDER_H_
+
+// Inclusion order is important for CGAL patch
+#include <CGAL/Kd_tree.h>
+#include <CGAL/Search_traits.h>
+
+#include <gudhi/Persistence_graph.h>
+#include <gudhi/Internal_point.h>
+
+#include <unordered_set>
+#include <vector>
+
+namespace Gudhi {
+
+namespace persistence_diagram {
+
+/** \internal \brief Variant of CGAL::Fuzzy_iso_box to ensure that the box ic closed. It isn't clear how necessary that is.
+ */
+struct Square_query {
+ typedef CGAL::Dimension_tag<2> D;
+ typedef Internal_point Point_d;
+ typedef double FT;
+ bool contains(Point_d p) const {
+ return std::abs(p.x()-c.x())<=size && std::abs(p.y()-c.y())<=size;
+ }
+ bool inner_range_intersects(CGAL::Kd_tree_rectangle<FT,D> const&r) const {
+ return
+ r.max_coord(0) >= c.x() - size &&
+ r.min_coord(0) <= c.x() + size &&
+ r.max_coord(1) >= c.y() - size &&
+ r.min_coord(1) <= c.y() + size;
+ }
+ bool outer_range_contains(CGAL::Kd_tree_rectangle<FT,D> const&r) const {
+ return
+ r.min_coord(0) >= c.x() - size &&
+ r.max_coord(0) <= c.x() + size &&
+ r.min_coord(1) >= c.y() - size &&
+ r.max_coord(1) <= c.y() + size;
+ }
+ Point_d c;
+ FT size;
+};
+
+/** \internal \brief data structure used to find any point (including projections) in V near to a query point from U
+ * (which can be a projection).
+ *
+ * V points have to be added manually using their index and before the first pull. A neighbor pulled is automatically
+ * removed.
+ *
+ * \ingroup bottleneck_distance
+ */
+class Neighbors_finder {
+ typedef CGAL::Dimension_tag<2> D;
+ typedef CGAL::Search_traits<double, Internal_point, const double*, Construct_coord_iterator, D> Traits;
+ typedef CGAL::Kd_tree<Traits> Kd_tree;
+
+ public:
+ /** \internal \brief Constructor taking the near distance definition as parameter. */
+ Neighbors_finder(const Persistence_graph& g, double r);
+ /** \internal \brief A point added will be possibly pulled. */
+ void add(int v_point_index);
+ /** \internal \brief Returns and remove a V point near to the U point given as parameter, null_point_index() if
+ * there isn't such a point. */
+ int pull_near(int u_point_index);
+ /** \internal \brief Returns and remove all the V points near to the U point given as parameter. */
+ std::vector<int> pull_all_near(int u_point_index);
+
+ private:
+ const Persistence_graph& g;
+ const double r;
+ Kd_tree kd_t;
+ std::unordered_set<int> projections_f;
+};
+
+/** \internal \brief data structure used to find any point (including projections) in V near to a query point from U
+ * (which can be a projection) in a layered graph layer given as parmeter.
+ *
+ * V points have to be added manually using their index and before the first pull. A neighbor pulled is automatically
+ * removed.
+ *
+ * \ingroup bottleneck_distance
+ */
+class Layered_neighbors_finder {
+ public:
+ /** \internal \brief Constructor taking the near distance definition as parameter. */
+ Layered_neighbors_finder(const Persistence_graph& g, double r);
+ /** \internal \brief A point added will be possibly pulled. */
+ void add(int v_point_index, int vlayer);
+ /** \internal \brief Returns and remove a V point near to the U point given as parameter, null_point_index() if
+ * there isn't such a point. */
+ int pull_near(int u_point_index, int vlayer);
+ /** \internal \brief Returns the number of layers. */
+ int vlayers_number() const;
+
+ private:
+ const Persistence_graph& g;
+ const double r;
+ std::vector<std::unique_ptr<Neighbors_finder>> neighbors_finder;
+};
+
+inline Neighbors_finder::Neighbors_finder(const Persistence_graph& g, double r) :
+ g(g), r(r), kd_t(), projections_f() { }
+
+inline void Neighbors_finder::add(int v_point_index) {
+ if (g.on_the_v_diagonal(v_point_index))
+ projections_f.emplace(v_point_index);
+ else
+ kd_t.insert(g.get_v_point(v_point_index));
+}
+
+inline int Neighbors_finder::pull_near(int u_point_index) {
+ int tmp;
+ int c = g.corresponding_point_in_v(u_point_index);
+ if (g.on_the_u_diagonal(u_point_index) && !projections_f.empty()) {
+ // Any pair of projection is at distance 0
+ tmp = *projections_f.cbegin();
+ projections_f.erase(tmp);
+ } else if (projections_f.count(c) && (g.distance(u_point_index, c) <= r)) {
+ // Is the query point near to its projection ?
+ tmp = c;
+ projections_f.erase(tmp);
+ } else {
+ // Is the query point near to a V point in the plane ?
+ Internal_point u_point = g.get_u_point(u_point_index);
+ auto neighbor = kd_t.search_any_point(Square_query{u_point, r});
+ if(!neighbor)
+ return null_point_index();
+ tmp = neighbor->point_index;
+ auto point = g.get_v_point(tmp);
+ int idx = point.point_index;
+ kd_t.remove(point, [idx](Internal_point const&p){return p.point_index == idx;});
+ }
+ return tmp;
+}
+
+inline std::vector<int> Neighbors_finder::pull_all_near(int u_point_index) {
+ std::vector<int> all_pull;
+ int last_pull = pull_near(u_point_index);
+ while (last_pull != null_point_index()) {
+ all_pull.emplace_back(last_pull);
+ last_pull = pull_near(u_point_index);
+ }
+ return all_pull;
+}
+
+inline Layered_neighbors_finder::Layered_neighbors_finder(const Persistence_graph& g, double r) :
+ g(g), r(r), neighbors_finder() { }
+
+inline void Layered_neighbors_finder::add(int v_point_index, int vlayer) {
+ for (int l = neighbors_finder.size(); l <= vlayer; l++)
+ neighbors_finder.emplace_back(std::unique_ptr<Neighbors_finder>(new Neighbors_finder(g, r)));
+ neighbors_finder.at(vlayer)->add(v_point_index);
+}
+
+inline int Layered_neighbors_finder::pull_near(int u_point_index, int vlayer) {
+ if (static_cast<int> (neighbors_finder.size()) <= vlayer)
+ return null_point_index();
+ return neighbors_finder.at(vlayer)->pull_near(u_point_index);
+}
+
+inline int Layered_neighbors_finder::vlayers_number() const {
+ return static_cast<int> (neighbors_finder.size());
+}
+
+} // namespace persistence_diagram
+
+} // namespace Gudhi
+
+#endif // NEIGHBORS_FINDER_H_
diff --git a/include/gudhi/Null_output_iterator.h b/include/gudhi/Null_output_iterator.h
new file mode 100644
index 00000000..42e6e449
--- /dev/null
+++ b/include/gudhi/Null_output_iterator.h
@@ -0,0 +1,48 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2017 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NULL_OUTPUT_ITERATOR_H_
+#define NULL_OUTPUT_ITERATOR_H_
+
+#include <iterator>
+
+namespace Gudhi {
+
+/** An output iterator that ignores whatever it is given. */
+struct Null_output_iterator {
+ typedef std::output_iterator_tag iterator_category;
+ typedef void value_type;
+ typedef void difference_type;
+ typedef void pointer;
+ typedef void reference;
+
+ Null_output_iterator& operator++() {return *this;}
+ Null_output_iterator operator++(int) {return *this;}
+ struct proxy {
+ template<class T>
+ proxy& operator=(T&&){return *this;}
+ };
+ proxy operator*()const{return {};}
+};
+} // namespace Gudhi
+
+#endif // NULL_OUTPUT_ITERATOR_H_
diff --git a/include/gudhi/Persistence_graph.h b/include/gudhi/Persistence_graph.h
new file mode 100644
index 00000000..44f4b827
--- /dev/null
+++ b/include/gudhi/Persistence_graph.h
@@ -0,0 +1,188 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Francois Godi
+ *
+ * Copyright (C) 2015 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_GRAPH_H_
+#define PERSISTENCE_GRAPH_H_
+
+#include <gudhi/Internal_point.h>
+
+#ifdef GUDHI_USE_TBB
+#include <tbb/parallel_sort.h>
+#endif
+
+#include <vector>
+#include <algorithm>
+#include <limits> // for numeric_limits
+
+namespace Gudhi {
+
+namespace persistence_diagram {
+
+/** \internal \brief Structure representing an euclidean bipartite graph containing
+ * the points from the two persistence diagrams (including the projections).
+ *
+ * \ingroup bottleneck_distance
+ */
+class Persistence_graph {
+ public:
+ /** \internal \brief Constructor taking 2 PersistenceDiagrams (concept) as parameters. */
+ template<typename Persistence_diagram1, typename Persistence_diagram2>
+ Persistence_graph(const Persistence_diagram1& diag1, const Persistence_diagram2& diag2, double e);
+ /** \internal \brief Is the given point from U the projection of a point in V ? */
+ bool on_the_u_diagonal(int u_point_index) const;
+ /** \internal \brief Is the given point from V the projection of a point in U ? */
+ bool on_the_v_diagonal(int v_point_index) const;
+ /** \internal \brief Given a point from V, returns the corresponding (projection or projector) point in U. */
+ int corresponding_point_in_u(int v_point_index) const;
+ /** \internal \brief Given a point from U, returns the corresponding (projection or projector) point in V. */
+ int corresponding_point_in_v(int u_point_index) const;
+ /** \internal \brief Given a point from U and a point from V, returns the distance between those points. */
+ double distance(int u_point_index, int v_point_index) const;
+ /** \internal \brief Returns size = |U| = |V|. */
+ int size() const;
+ /** \internal \brief Is there as many infinite points (alive components) in both diagrams ? */
+ double bottleneck_alive() const;
+ /** \internal \brief Returns the O(n^2) sorted distances between the points. */
+ std::vector<double> sorted_distances() const;
+ /** \internal \brief Returns an upper bound for the diameter of the convex hull of all non infinite points */
+ double diameter_bound() const;
+ /** \internal \brief Returns the corresponding internal point */
+ Internal_point get_u_point(int u_point_index) const;
+ /** \internal \brief Returns the corresponding internal point */
+ Internal_point get_v_point(int v_point_index) const;
+
+ private:
+ std::vector<Internal_point> u;
+ std::vector<Internal_point> v;
+ double b_alive;
+};
+
+template<typename Persistence_diagram1, typename Persistence_diagram2>
+Persistence_graph::Persistence_graph(const Persistence_diagram1 &diag1,
+ const Persistence_diagram2 &diag2, double e)
+ : u(), v(), b_alive(0.) {
+ std::vector<double> u_alive;
+ std::vector<double> v_alive;
+ for (auto it = std::begin(diag1); it != std::end(diag1); ++it) {
+ if (std::get<1>(*it) == std::numeric_limits<double>::infinity())
+ u_alive.push_back(std::get<0>(*it));
+ else if (std::get<1>(*it) - std::get<0>(*it) > e)
+ u.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), u.size()));
+ }
+ for (auto it = std::begin(diag2); it != std::end(diag2); ++it) {
+ if (std::get<1>(*it) == std::numeric_limits<double>::infinity())
+ v_alive.push_back(std::get<0>(*it));
+ else if (std::get<1>(*it) - std::get<0>(*it) > e)
+ v.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), v.size()));
+ }
+ if (u.size() < v.size())
+ swap(u, v);
+ std::sort(u_alive.begin(), u_alive.end());
+ std::sort(v_alive.begin(), v_alive.end());
+ if (u_alive.size() != v_alive.size()) {
+ b_alive = std::numeric_limits<double>::infinity();
+ } else {
+ for (auto it_u = u_alive.cbegin(), it_v = v_alive.cbegin(); it_u != u_alive.cend(); ++it_u, ++it_v)
+ b_alive = std::max(b_alive, std::fabs(*it_u - *it_v));
+ }
+}
+
+inline bool Persistence_graph::on_the_u_diagonal(int u_point_index) const {
+ return u_point_index >= static_cast<int> (u.size());
+}
+
+inline bool Persistence_graph::on_the_v_diagonal(int v_point_index) const {
+ return v_point_index >= static_cast<int> (v.size());
+}
+
+inline int Persistence_graph::corresponding_point_in_u(int v_point_index) const {
+ return on_the_v_diagonal(v_point_index) ?
+ v_point_index - static_cast<int> (v.size()) : v_point_index + static_cast<int> (u.size());
+}
+
+inline int Persistence_graph::corresponding_point_in_v(int u_point_index) const {
+ return on_the_u_diagonal(u_point_index) ?
+ u_point_index - static_cast<int> (u.size()) : u_point_index + static_cast<int> (v.size());
+}
+
+inline double Persistence_graph::distance(int u_point_index, int v_point_index) const {
+ if (on_the_u_diagonal(u_point_index) && on_the_v_diagonal(v_point_index))
+ return 0.;
+ Internal_point p_u = get_u_point(u_point_index);
+ Internal_point p_v = get_v_point(v_point_index);
+ return std::max(std::fabs(p_u.x() - p_v.x()), std::fabs(p_u.y() - p_v.y()));
+}
+
+inline int Persistence_graph::size() const {
+ return static_cast<int> (u.size() + v.size());
+}
+
+inline double Persistence_graph::bottleneck_alive() const {
+ return b_alive;
+}
+
+inline std::vector<double> Persistence_graph::sorted_distances() const {
+ std::vector<double> distances;
+ distances.push_back(0.); // for empty diagrams
+ for (int u_point_index = 0; u_point_index < size(); ++u_point_index) {
+ distances.push_back(distance(u_point_index, corresponding_point_in_v(u_point_index)));
+ for (int v_point_index = 0; v_point_index < size(); ++v_point_index)
+ distances.push_back(distance(u_point_index, v_point_index));
+ }
+#ifdef GUDHI_USE_TBB
+ tbb::parallel_sort(distances.begin(), distances.end());
+#else
+ std::sort(distances.begin(), distances.end());
+#endif
+ return distances;
+}
+
+inline Internal_point Persistence_graph::get_u_point(int u_point_index) const {
+ if (!on_the_u_diagonal(u_point_index))
+ return u.at(u_point_index);
+ Internal_point projector = v.at(corresponding_point_in_v(u_point_index));
+ double m = (projector.x() + projector.y()) / 2.;
+ return Internal_point(m, m, u_point_index);
+}
+
+inline Internal_point Persistence_graph::get_v_point(int v_point_index) const {
+ if (!on_the_v_diagonal(v_point_index))
+ return v.at(v_point_index);
+ Internal_point projector = u.at(corresponding_point_in_u(v_point_index));
+ double m = (projector.x() + projector.y()) / 2.;
+ return Internal_point(m, m, v_point_index);
+}
+
+inline double Persistence_graph::diameter_bound() const {
+ double max = 0.;
+ for (auto it = u.cbegin(); it != u.cend(); it++)
+ max = std::max(max, it->y());
+ for (auto it = v.cbegin(); it != v.cend(); it++)
+ max = std::max(max, it->y());
+ return max;
+}
+
+} // namespace persistence_diagram
+
+} // namespace Gudhi
+
+#endif // PERSISTENCE_GRAPH_H_
diff --git a/include/gudhi/Persistent_cohomology.h b/include/gudhi/Persistent_cohomology.h
index b31df6a4..672fda48 100644
--- a/include/gudhi/Persistent_cohomology.h
+++ b/include/gudhi/Persistent_cohomology.h
@@ -110,7 +110,7 @@ class Persistent_cohomology {
cell_pool_() {
if (cpx_->num_simplices() > std::numeric_limits<Simplex_key>::max()) {
// num_simplices must be strictly lower than the limit, because a value is reserved for null_key.
- throw std::out_of_range ("The number of simplices is more than Simplex_key type numeric limit.");
+ throw std::out_of_range("The number of simplices is more than Simplex_key type numeric limit.");
}
Simplex_key idx_fil = 0;
for (auto sh : cpx_->filtration_simplex_range()) {
@@ -300,8 +300,7 @@ class Persistent_cohomology {
// with multiplicity. We used to sum the coefficients directly in
// annotations_in_boundary by using a map, we now do it later.
typedef std::pair<Column *, int> annotation_t;
- // Danger: not thread-safe!
- static std::vector<annotation_t> annotations_in_boundary;
+ thread_local std::vector<annotation_t> annotations_in_boundary;
annotations_in_boundary.clear();
int sign = 1 - 2 * (dim_sigma % 2); // \in {-1,1} provides the sign in the
// alternate sum in the boundary.
@@ -604,7 +603,7 @@ class Persistent_cohomology {
*/
std::vector<int> betti_numbers() const {
// Init Betti numbers vector with zeros until Simplicial complex dimension
- std::vector<int> betti_numbers(cpx_->dimension(), 0);
+ std::vector<int> betti_numbers(dim_max_, 0);
for (auto pair : persistent_pairs_) {
// Count never ended persistence intervals
@@ -643,8 +642,7 @@ class Persistent_cohomology {
*/
std::vector<int> persistent_betti_numbers(Filtration_value from, Filtration_value to) const {
// Init Betti numbers vector with zeros until Simplicial complex dimension
- std::vector<int> betti_numbers(cpx_->dimension(), 0);
-
+ std::vector<int> betti_numbers(dim_max_, 0);
for (auto pair : persistent_pairs_) {
// Count persistence intervals that covers the given interval
// null_simplex test : if the function is called with to=+infinity, we still get something useful. And it will
@@ -690,6 +688,22 @@ class Persistent_cohomology {
return persistent_pairs_;
}
+ /** @brief Returns persistence intervals for a given dimension.
+ * @param[in] dimension Dimension to get the birth and death pairs from.
+ * @return A vector of persistence intervals (birth and death) on a fixed dimension.
+ */
+ std::vector< std::pair< Filtration_value , Filtration_value > >
+ intervals_in_dimension(int dimension) {
+ std::vector< std::pair< Filtration_value , Filtration_value > > result;
+ // auto && pair, to avoid unnecessary copying
+ for (auto && pair : persistent_pairs_) {
+ if (cpx_->dimension(get<0>(pair)) == dimension) {
+ result.emplace_back(cpx_->filtration(get<0>(pair)), cpx_->filtration(get<1>(pair)));
+ }
+ }
+ return result;
+ }
+
private:
/*
* Structure representing a cocycle.
diff --git a/include/gudhi/Points_3D_off_io.h b/include/gudhi/Points_3D_off_io.h
index 2647f11e..b0d24998 100644
--- a/include/gudhi/Points_3D_off_io.h
+++ b/include/gudhi/Points_3D_off_io.h
@@ -132,12 +132,12 @@ class Points_3D_off_visitor_reader {
*
* @code template<class InputIterator > Point_3::Point_3(double x, double y, double z) @endcode
*
- * @section Example
+ * @section point3doffioexample Example
*
* This example loads points from an OFF file and builds a vector of CGAL points in dimension 3.
* Then, it is asked to display the points.
*
- * @include common/CGAL_3D_points_off_reader.cpp
+ * @include common/example_CGAL_3D_points_off_reader.cpp
*
* When launching:
*
diff --git a/include/gudhi/Points_off_io.h b/include/gudhi/Points_off_io.h
index 74b49386..29af8a8a 100644
--- a/include/gudhi/Points_off_io.h
+++ b/include/gudhi/Points_off_io.h
@@ -73,9 +73,8 @@ class Points_off_visitor_reader {
* @details
* Point_d must have a constructor with the following form:
*
- * @code template<class InputIterator > Point_d::Point_d(int d, InputIterator first, InputIterator last) @endcode
+ * @code template<class InputIterator > Point_d::Point_d(InputIterator first, InputIterator last) @endcode
*
- * where d is the point dimension.
*/
void point(const std::vector<double>& point) {
#ifdef DEBUG_TRACES
@@ -86,7 +85,7 @@ class Points_off_visitor_reader {
std::cout << std::endl;
#endif // DEBUG_TRACES
// Fill the point cloud
- point_cloud.push_back(Point_d(point.size(), point.begin(), point.end()));
+ point_cloud.push_back(Point_d(point.begin(), point.end()));
}
// Off_reader visitor maximal_face implementation - Only points are read
@@ -115,16 +114,16 @@ class Points_off_visitor_reader {
*
* where d is the point dimension.
*
- * \section Example
+ * \section pointoffioexample Example
*
- * This example loads points from an OFF file and builds a vector of CGAL points in dimension d.
+ * This example loads points from an OFF file and builds a vector of points (vector of double).
* Then, it is asked to display the points.
*
- * \include common/CGAL_points_off_reader.cpp
+ * \include common/example_vector_double_points_off_reader.cpp
*
* When launching:
*
- * \code $> ./cgaloffreader ../../data/points/alphacomplexdoc.off
+ * \code $> ./vector_double_off_reader ../../data/points/alphacomplexdoc.off
* \endcode
*
* the program output is:
diff --git a/include/gudhi/Rips_complex.h b/include/gudhi/Rips_complex.h
new file mode 100644
index 00000000..1e4b76a7
--- /dev/null
+++ b/include/gudhi/Rips_complex.h
@@ -0,0 +1,185 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clément Maria, Pawel Dlotko, Vincent Rouvreau
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RIPS_COMPLEX_H_
+#define RIPS_COMPLEX_H_
+
+#include <gudhi/Debug_utils.h>
+#include <gudhi/graph_simplicial_complex.h>
+
+#include <boost/graph/adjacency_list.hpp>
+
+#include <iostream>
+#include <vector>
+#include <map>
+#include <string>
+#include <limits> // for numeric_limits
+#include <utility> // for pair<>
+
+
+namespace Gudhi {
+
+namespace rips_complex {
+
+/**
+ * \class Rips_complex
+ * \brief Rips complex data structure.
+ *
+ * \ingroup rips_complex
+ *
+ * \details
+ * The data structure is a one skeleton graph, or Rips graph, containing edges when the edge length is less or equal
+ * to a given threshold. Edge length is computed from a user given point cloud with a given distance function, or a
+ * distance matrix.
+ *
+ * \tparam Filtration_value is the type used to store the filtration values of the simplicial complex.
+ */
+template<typename Filtration_value>
+class Rips_complex {
+ public:
+ /**
+ * \brief Type of the one skeleton graph stored inside the Rips complex structure.
+ */
+ typedef typename boost::adjacency_list < boost::vecS, boost::vecS, boost::undirectedS
+ , boost::property < vertex_filtration_t, Filtration_value >
+ , boost::property < edge_filtration_t, Filtration_value >> OneSkeletonGraph;
+
+ private:
+ typedef int Vertex_handle;
+
+ public:
+ /** \brief Rips_complex constructor from a list of points.
+ *
+ * @param[in] points Range of points.
+ * @param[in] threshold Rips value.
+ * @param[in] distance distance function that returns a `Filtration_value` from 2 given points.
+ *
+ * \tparam ForwardPointRange must be a range for which `std::begin` and `std::end` return input iterators on a
+ * point.
+ *
+ * \tparam Distance furnishes `operator()(const Point& p1, const Point& p2)`, where
+ * `Point` is a point from the `ForwardPointRange`, and that returns a `Filtration_value`.
+ */
+ template<typename ForwardPointRange, typename Distance >
+ Rips_complex(const ForwardPointRange& points, Filtration_value threshold, Distance distance) {
+ compute_proximity_graph(points, threshold, distance);
+ }
+
+ /** \brief Rips_complex constructor from a distance matrix.
+ *
+ * @param[in] distance_matrix Range of distances.
+ * @param[in] threshold Rips value.
+ *
+ * \tparam DistanceMatrix must have a `size()` method and on which `distance_matrix[i][j]` returns
+ * the distance between points \f$i\f$ and \f$j\f$ as long as \f$ 0 \leqslant i < j \leqslant
+ * distance\_matrix.size().\f$
+ */
+ template<typename DistanceMatrix>
+ Rips_complex(const DistanceMatrix& distance_matrix, Filtration_value threshold) {
+ compute_proximity_graph(boost::irange((size_t)0, distance_matrix.size()), threshold,
+ [&](size_t i, size_t j){return distance_matrix[j][i];});
+ }
+
+ /** \brief Initializes the simplicial complex from the Rips graph and expands it until a given maximal
+ * dimension.
+ *
+ * \tparam SimplicialComplexForRips must meet `SimplicialComplexForRips` concept.
+ *
+ * @param[in] complex SimplicialComplexForRips to be created.
+ * @param[in] dim_max graph expansion for Rips until this given maximal dimension.
+ * @exception std::invalid_argument In debug mode, if `complex.num_vertices()` does not return 0.
+ *
+ */
+ template <typename SimplicialComplexForRips>
+ void create_complex(SimplicialComplexForRips& complex, int dim_max) {
+ GUDHI_CHECK(complex.num_vertices() == 0,
+ std::invalid_argument("Rips_complex::create_complex - simplicial complex is not empty"));
+
+ // insert the proximity graph in the simplicial complex
+ complex.insert_graph(rips_skeleton_graph_);
+ // expand the graph until dimension dim_max
+ complex.expansion(dim_max);
+ }
+
+ private:
+ /** \brief Computes the proximity graph of the points.
+ *
+ * If points contains n elements, the proximity graph is the graph with n vertices, and an edge [u,v] iff the
+ * distance function between points u and v is smaller than threshold.
+ *
+ * \tparam ForwardPointRange furnishes `.begin()` and `.end()`
+ * methods.
+ *
+ * \tparam Distance furnishes `operator()(const Point& p1, const Point& p2)`, where
+ * `Point` is a point from the `ForwardPointRange`, and that returns a `Filtration_value`.
+ */
+ template< typename ForwardPointRange, typename Distance >
+ void compute_proximity_graph(const ForwardPointRange& points, Filtration_value threshold,
+ Distance distance) {
+ std::vector< std::pair< Vertex_handle, Vertex_handle > > edges;
+ std::vector< Filtration_value > edges_fil;
+
+ // Compute the proximity graph of the points.
+ // If points contains n elements, the proximity graph is the graph with n vertices, and an edge [u,v] iff the
+ // distance function between points u and v is smaller than threshold.
+ // --------------------------------------------------------------------------------------------
+ // Creates the vector of edges and its filtration values (returned by distance function)
+ Vertex_handle idx_u = 0;
+ for (auto it_u = std::begin(points); it_u != std::end(points); ++it_u, ++idx_u) {
+ Vertex_handle idx_v = idx_u + 1;
+ for (auto it_v = it_u + 1; it_v != std::end(points); ++it_v, ++idx_v) {
+ Filtration_value fil = distance(*it_u, *it_v);
+ if (fil <= threshold) {
+ edges.emplace_back(idx_u, idx_v);
+ edges_fil.push_back(fil);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Creates the proximity graph from edges and sets the property with the filtration value.
+ // Number of points is labeled from 0 to idx_u-1
+ // --------------------------------------------------------------------------------------------
+ // Do not use : rips_skeleton_graph_ = OneSkeletonGraph(...) -> deep copy of the graph (boost graph is not
+ // move-enabled)
+ rips_skeleton_graph_.~OneSkeletonGraph();
+ new(&rips_skeleton_graph_)OneSkeletonGraph(edges.begin(), edges.end(), edges_fil.begin(), idx_u);
+
+ auto vertex_prop = boost::get(vertex_filtration_t(), rips_skeleton_graph_);
+
+ using vertex_iterator = typename boost::graph_traits<OneSkeletonGraph>::vertex_iterator;
+ vertex_iterator vi, vi_end;
+ for (std::tie(vi, vi_end) = boost::vertices(rips_skeleton_graph_);
+ vi != vi_end; ++vi) {
+ boost::put(vertex_prop, *vi, 0.);
+ }
+ }
+
+ private:
+ OneSkeletonGraph rips_skeleton_graph_;
+};
+
+} // namespace rips_complex
+
+} // namespace Gudhi
+
+#endif // RIPS_COMPLEX_H_
diff --git a/include/gudhi/Simplex_tree.h b/include/gudhi/Simplex_tree.h
index 63e3f0e5..317bce23 100644
--- a/include/gudhi/Simplex_tree.h
+++ b/include/gudhi/Simplex_tree.h
@@ -1029,7 +1029,7 @@ class Simplex_tree {
Dictionary_it next = siblings->members().begin();
++next;
- static std::vector<std::pair<Vertex_handle, Node> > inter; // static, not thread-safe.
+ thread_local std::vector<std::pair<Vertex_handle, Node> > inter;
for (Dictionary_it s_h = siblings->members().begin();
s_h != siblings->members().end(); ++s_h, ++next) {
Simplex_handle root_sh = find_vertex(s_h->first);
diff --git a/include/gudhi/Skeleton_blocker.h b/include/gudhi/Skeleton_blocker.h
index 822282fd..32fe411c 100644
--- a/include/gudhi/Skeleton_blocker.h
+++ b/include/gudhi/Skeleton_blocker.h
@@ -1,24 +1,24 @@
- /* This file is part of the Gudhi Library. The Gudhi library
- * (Geometric Understanding in Higher Dimensions) is a generic C++
- * library for computational topology.
- *
- * Author(s): David Salinas
- *
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): David Salinas
+ *
+ * Copyright (C) 2014 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
#ifndef SKELETON_BLOCKER_H_
#define SKELETON_BLOCKER_H_
@@ -37,11 +37,12 @@ namespace Gudhi {
namespace skeleton_blocker {
-/** \defgroup skbl Skeleton-Blocker
+/** \defgroup skbl Skeleton-Blocker
+@{
\author David Salinas
-\section Introduction
+\section skblintroduction Introduction
The Skeleton-Blocker data-structure proposes a light encoding for simplicial complexes by storing only an *implicit* representation of its
simplices
\cite socg_blockers_2011,\cite blockers2012.
@@ -52,7 +53,7 @@ This data-structure handles all simplicial complexes operations such as
are operations that do not require simplex enumeration such as edge iteration, link computation or simplex contraction.
-\section Definitions
+\section skbldefinitions Definitions
We recall briefly classical definitions of simplicial complexes
\cite Munkres-elementsalgtop1984.
@@ -63,7 +64,7 @@ when \f$ \tau \neq \sigma\f$ we say that \f$ \tau\f$ is a proper-face of \f$ \si
An abstract simplicial complex is a set of simplices that contains all the faces of its simplices.
The 1-skeleton of a simplicial complex (or its graph) consists of its elements of dimension lower than 2.
-*\image html "ds_representation.png" "Skeleton-blocker representation" width=20cm
+ *\image html "ds_representation.png" "Skeleton-blocker representation" width=20cm
To encode, a simplicial complex, one can encodes all its simplices.
@@ -85,7 +86,7 @@ in next figure. Storing the graph and blockers of such simplicial complexes is m
their simplices.
-*\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres" width=10cm
+ *\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres" width=10cm
@@ -107,7 +108,7 @@ and point access in addition.
-\subsection Visitor
+\subsection skblvisitor Visitor
The class Skeleton_blocker_complex has a visitor that is called when usual operations such as adding an edge or remove a vertex are called.
You may want to use this visitor to compute statistics or to update another data-structure (for instance this visitor is heavily used in the \ref contr package).
@@ -115,7 +116,7 @@ You may want to use this visitor to compute statistics or to update another data
-\section Example
+\section skblexample Example
\subsection Iterating Iterating through vertices, edges, blockers and simplices
@@ -127,46 +128,46 @@ such as the Simplex Tree. The following example computes the Euler Characteristi
of a simplicial complex.
\code{.cpp}
- typedef Skeleton_blocker_complex<Skeleton_blocker_simple_traits> Complex;
- typedef Complex::Vertex_handle Vertex_handle;
- typedef Complex::Simplex Simplex;
-
- const int n = 15;
-
- // build a full complex with 10 vertices and 2^n-1 simplices
- Complex complex;
- for(int i=0;i<n;i++)
- complex.add_vertex();
- for(int i=0;i<n;i++)
- for(int j=0;j<i;j++)
- complex.add_edge_without_blockers(Vertex_handle(i),Vertex_handle(j));
-
- // this is just to illustrate iterators, to count number of vertices
- // or edges, complex.num_vertices() and complex.num_edges() are
- // more appropriated!
- unsigned num_vertices = 0;
- for(auto v : complex.vertex_range()){
- ++num_vertices;
- }
-
- unsigned num_edges = 0;
- for(auto e : complex.edge_range())
- ++num_edges;
-
- unsigned euler = 0;
- unsigned num_simplices = 0;
- // we use a reference to a simplex instead of a copy
- // value here because a simplex is a set of integers
- // and copying it cost time
- for(const Simplex & s : complex.star_simplex_range()){
- ++num_simplices;
- if(s.dimension()%2 == 0)
- euler += 1;
- else
- euler -= 1;
- }
- std::cout << "Saw "<<num_vertices<<" vertices, "<<num_edges<<" edges and "<<num_simplices<<" simplices"<<std::endl;
- std::cout << "The Euler Characteristic is "<<euler<<std::endl;
+ typedef Skeleton_blocker_complex<Skeleton_blocker_simple_traits> Complex;
+ typedef Complex::Vertex_handle Vertex_handle;
+ typedef Complex::Simplex Simplex;
+
+ const int n = 15;
+
+ // build a full complex with 10 vertices and 2^n-1 simplices
+ Complex complex;
+ for(int i=0;i<n;i++)
+ complex.add_vertex();
+ for(int i=0;i<n;i++)
+ for(int j=0;j<i;j++)
+ complex.add_edge_without_blockers(Vertex_handle(i),Vertex_handle(j));
+
+ // this is just to illustrate iterators, to count number of vertices
+ // or edges, complex.num_vertices() and complex.num_edges() are
+ // more appropriated!
+ unsigned num_vertices = 0;
+ for(auto v : complex.vertex_range()){
+ ++num_vertices;
+ }
+
+ unsigned num_edges = 0;
+ for(auto e : complex.edge_range())
+ ++num_edges;
+
+ unsigned euler = 0;
+ unsigned num_simplices = 0;
+ // we use a reference to a simplex instead of a copy
+ // value here because a simplex is a set of integers
+ // and copying it cost time
+ for(const Simplex & s : complex.star_simplex_range()){
+ ++num_simplices;
+ if(s.dimension()%2 == 0)
+ euler += 1;
+ else
+ euler -= 1;
+ }
+ std::cout << "Saw "<<num_vertices<<" vertices, "<<num_edges<<" edges and "<<num_simplices<<" simplices"<<std::endl;
+ std::cout << "The Euler Characteristic is "<<euler<<std::endl;
\endcode
@@ -181,43 +182,43 @@ The Euler Characteristic is 1
\subsection s Constructing a skeleton-blockers from a list of maximal faces or from a list of faces
\code{.cpp}
- std::vector<Simplex> simplices;
-
- //add 4 triangles of a tetrahedron 0123
- simplices.push_back(Simplex(Vertex_handle(0),Vertex_handle(1),Vertex_handle(2)));
- simplices.push_back(Simplex(Vertex_handle(1),Vertex_handle(2),Vertex_handle(3)));
- simplices.push_back(Simplex(Vertex_handle(3),Vertex_handle(0),Vertex_handle(2)));
- simplices.push_back(Simplex(Vertex_handle(3),Vertex_handle(0),Vertex_handle(1)));
-
- Complex complex;
- //get complex from top faces
- make_complex_from_top_faces(complex,simplices.begin(),simplices.end());
-
- std::cout << "Simplices:"<<std::endl;
- for(const Simplex & s : complex.star_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
-
- //One blocker as simplex 0123 is not in the complex but all its proper faces are.
- std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
-
- //now build a complex from its full list of simplices
- simplices.clear();
- simplices.push_back(Simplex(Vertex_handle(0)));
- simplices.push_back(Simplex(Vertex_handle(1)));
- simplices.push_back(Simplex(Vertex_handle(2)));
- simplices.push_back(Simplex(Vertex_handle(0),Vertex_handle(1)));
- simplices.push_back(Simplex(Vertex_handle(1),Vertex_handle(2)));
- simplices.push_back(Simplex(Vertex_handle(2),Vertex_handle(0)));
- complex = Complex(simplices.begin(),simplices.end());
-
- std::cout << "Simplices:"<<std::endl;
- for(const Simplex & s : complex.star_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
-
- //One blocker as simplex 012 is not in the complex but all its proper faces are.
- std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
+ std::vector<Simplex> simplices;
+
+ //add 4 triangles of a tetrahedron 0123
+ simplices.push_back(Simplex(Vertex_handle(0),Vertex_handle(1),Vertex_handle(2)));
+ simplices.push_back(Simplex(Vertex_handle(1),Vertex_handle(2),Vertex_handle(3)));
+ simplices.push_back(Simplex(Vertex_handle(3),Vertex_handle(0),Vertex_handle(2)));
+ simplices.push_back(Simplex(Vertex_handle(3),Vertex_handle(0),Vertex_handle(1)));
+
+ Complex complex;
+ //get complex from top faces
+ make_complex_from_top_faces(complex,simplices.begin(),simplices.end());
+
+ std::cout << "Simplices:"<<std::endl;
+ for(const Simplex & s : complex.star_simplex_range())
+ std::cout << s << " ";
+ std::cout << std::endl;
+
+ //One blocker as simplex 0123 is not in the complex but all its proper faces are.
+ std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
+
+ //now build a complex from its full list of simplices
+ simplices.clear();
+ simplices.push_back(Simplex(Vertex_handle(0)));
+ simplices.push_back(Simplex(Vertex_handle(1)));
+ simplices.push_back(Simplex(Vertex_handle(2)));
+ simplices.push_back(Simplex(Vertex_handle(0),Vertex_handle(1)));
+ simplices.push_back(Simplex(Vertex_handle(1),Vertex_handle(2)));
+ simplices.push_back(Simplex(Vertex_handle(2),Vertex_handle(0)));
+ complex = Complex(simplices.begin(),simplices.end());
+
+ std::cout << "Simplices:"<<std::endl;
+ for(const Simplex & s : complex.star_simplex_range())
+ std::cout << s << " ";
+ std::cout << std::endl;
+
+ //One blocker as simplex 012 is not in the complex but all its proper faces are.
+ std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
\endcode
\verbatim
./SkeletonBlockerFromSimplices
@@ -236,12 +237,12 @@ The author wishes to thank Dominique Attali and André Lieutier for
their collaboration to write the two initial papers
\cite socg_blockers_2011,\cite blockers2012
about this data-structure
- and also Dominique for leaving him use a prototype.
+ and also Dominique for leaving him use a prototype.
-\copyright GNU General Public License v3.
-*/
-/** @} */ // end defgroup
+\copyright GNU General Public License v3.
+
+@} */
} // namespace skeleton_blocker
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_complex_visitor.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_complex_visitor.h
index 32f40a4b..ba3636bc 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_complex_visitor.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_complex_visitor.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SKELETON_BLOCKER_COMPLEX_VISITOR_H_
#define SKELETON_BLOCKER_SKELETON_BLOCKER_COMPLEX_VISITOR_H_
@@ -27,7 +28,7 @@
namespace Gudhi {
namespace skeleton_blocker {
-// todo rajouter les const
+// TODO(DS): to be constified
/**
*@class Skeleton_blocker_complex_visitor
@@ -36,7 +37,7 @@ namespace skeleton_blocker {
template<typename Vertex_handle>
class Skeleton_blocker_complex_visitor {
public:
- virtual ~Skeleton_blocker_complex_visitor() {}
+ virtual ~Skeleton_blocker_complex_visitor() { }
virtual void on_add_vertex(Vertex_handle) = 0;
virtual void on_remove_vertex(Vertex_handle) = 0;
@@ -61,9 +62,9 @@ class Skeleton_blocker_complex_visitor {
virtual void on_swaped_edge(Vertex_handle a, Vertex_handle b,
Vertex_handle x) = 0;
virtual void on_add_blocker(
- const Skeleton_blocker_simplex<Vertex_handle>&) = 0;
+ const Skeleton_blocker_simplex<Vertex_handle>&) = 0;
virtual void on_delete_blocker(
- const Skeleton_blocker_simplex<Vertex_handle>*) = 0;
+ const Skeleton_blocker_simplex<Vertex_handle>*) = 0;
};
/**
@@ -73,24 +74,23 @@ class Skeleton_blocker_complex_visitor {
*/
template<typename Vertex_handle>
class Dummy_complex_visitor : public Skeleton_blocker_complex_visitor<
- Vertex_handle> {
+Vertex_handle> {
public:
- void on_add_vertex(Vertex_handle) {
- }
- void on_remove_vertex(Vertex_handle) {
- }
- void on_add_edge_without_blockers(Vertex_handle a, Vertex_handle b) {
- }
- void on_remove_edge(Vertex_handle a, Vertex_handle b) {
- }
- void on_changed_edge(Vertex_handle a, Vertex_handle b) {
- }
- void on_swaped_edge(Vertex_handle a, Vertex_handle b, Vertex_handle x) {
- }
- void on_add_blocker(const Skeleton_blocker_simplex<Vertex_handle>&) {
- }
- void on_delete_blocker(const Skeleton_blocker_simplex<Vertex_handle>*) {
- }
+ void on_add_vertex(Vertex_handle) { }
+
+ void on_remove_vertex(Vertex_handle) { }
+
+ void on_add_edge_without_blockers(Vertex_handle a, Vertex_handle b) { }
+
+ void on_remove_edge(Vertex_handle a, Vertex_handle b) { }
+
+ void on_changed_edge(Vertex_handle a, Vertex_handle b) { }
+
+ void on_swaped_edge(Vertex_handle a, Vertex_handle b, Vertex_handle x) { }
+
+ void on_add_blocker(const Skeleton_blocker_simplex<Vertex_handle>&) { }
+
+ void on_delete_blocker(const Skeleton_blocker_simplex<Vertex_handle>*) { }
};
/**
@@ -100,29 +100,36 @@ class Dummy_complex_visitor : public Skeleton_blocker_complex_visitor<
*/
template<typename Vertex_handle>
class Print_complex_visitor : public Skeleton_blocker_complex_visitor<
- Vertex_handle> {
+Vertex_handle> {
public:
void on_add_vertex(Vertex_handle v) {
std::cerr << "on_add_vertex:" << v << std::endl;
}
+
void on_remove_vertex(Vertex_handle v) {
std::cerr << "on_remove_vertex:" << v << std::endl;
}
+
void on_add_edge_without_blockers(Vertex_handle a, Vertex_handle b) {
std::cerr << "on_add_edge_without_blockers:" << a << "," << b << std::endl;
}
+
void on_remove_edge(Vertex_handle a, Vertex_handle b) {
std::cerr << "on_remove_edge:" << a << "," << b << std::endl;
}
+
void on_changed_edge(Vertex_handle a, Vertex_handle b) {
std::cerr << "on_changed_edge:" << a << "," << b << std::endl;
}
+
void on_swaped_edge(Vertex_handle a, Vertex_handle b, Vertex_handle x) {
std::cerr << "on_swaped_edge:" << a << "," << b << "," << x << std::endl;
}
+
void on_add_blocker(const Skeleton_blocker_simplex<Vertex_handle>& b) {
std::cerr << "on_add_blocker:" << b << std::endl;
}
+
void on_delete_blocker(const Skeleton_blocker_simplex<Vertex_handle>* b) {
std::cerr << "on_delete_blocker:" << b << std::endl;
}
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_link_superior.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_link_superior.h
index 3bfb5d11..d4b60613 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_link_superior.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_link_superior.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SKELETON_BLOCKER_LINK_SUPERIOR_H_
#define SKELETON_BLOCKER_SKELETON_BLOCKER_LINK_SUPERIOR_H_
@@ -36,7 +37,7 @@ template<class ComplexType> class Skeleton_blocker_sub_complex;
*/
template<typename ComplexType>
class Skeleton_blocker_link_superior : public Skeleton_blocker_link_complex<
- ComplexType> {
+ComplexType> {
typedef typename ComplexType::Edge_handle Edge_handle;
typedef typename ComplexType::boost_vertex_handle boost_vertex_handle;
@@ -54,20 +55,17 @@ class Skeleton_blocker_link_superior : public Skeleton_blocker_link_complex<
typedef typename ComplexType::Root_simplex_handle::Simplex_vertex_const_iterator IdSimplexConstIterator;
Skeleton_blocker_link_superior()
- : Skeleton_blocker_link_complex<ComplexType>(true) {
- }
+ : Skeleton_blocker_link_complex<ComplexType>(true) { }
Skeleton_blocker_link_superior(const ComplexType & parent_complex,
Simplex& alpha_parent_adress)
: Skeleton_blocker_link_complex<ComplexType>(parent_complex,
- alpha_parent_adress, true) {
- }
+ alpha_parent_adress, true) { }
Skeleton_blocker_link_superior(const ComplexType & parent_complex,
Vertex_handle a_parent_adress)
: Skeleton_blocker_link_complex<ComplexType>(parent_complex,
- a_parent_adress, true) {
- }
+ a_parent_adress, true) { }
};
} // namespace skeleton_blocker
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_off_io.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_off_io.h
index ba46c49e..747e60f1 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_off_io.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_off_io.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SKELETON_BLOCKER_OFF_IO_H_
#define SKELETON_BLOCKER_SKELETON_BLOCKER_OFF_IO_H_
@@ -49,8 +50,8 @@ class Skeleton_blocker_off_flag_visitor_reader {
load_only_points_(load_only_points) { }
void init(int dim, int num_vertices, int num_faces, int num_edges) {
- // todo do an assert to check that this number are correctly read
- // todo reserve size for vector points
+ // TODO(DS): do an assert to check that this number are correctly read
+ // TODO(DS): reserve size for vector points
}
void point(const std::vector<double>& point) {
@@ -108,7 +109,7 @@ class Skeleton_blocker_off_visitor_reader {
void done() {
complex_ = make_complex_from_top_faces<Complex>(maximal_faces_.begin(), maximal_faces_.end(),
- points_.begin(), points_.end() );
+ points_.begin(), points_.end());
}
};
@@ -140,7 +141,7 @@ class Skeleton_blocker_off_reader {
}
/**
- * return true iff reading did not meet problems.
+ * return true if reading did not meet problems.
*/
bool is_valid() const {
return valid_;
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_geometric_traits.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_geometric_traits.h
index fb4a1106..275376e6 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_geometric_traits.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_geometric_traits.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SKELETON_BLOCKER_SIMPLE_GEOMETRIC_TRAITS_H_
#define SKELETON_BLOCKER_SKELETON_BLOCKER_SIMPLE_GEOMETRIC_TRAITS_H_
@@ -39,7 +40,7 @@ namespace skeleton_blocker {
*/
template<typename GeometryTrait>
struct Skeleton_blocker_simple_geometric_traits :
- public Skeleton_blocker_simple_traits {
+public Skeleton_blocker_simple_traits {
public:
typedef GeometryTrait GT;
typedef typename GT::Point Point;
@@ -57,19 +58,20 @@ struct Skeleton_blocker_simple_geometric_traits :
Point& point() {
return point_;
}
+
const Point& point() const {
return point_;
}
};
class Simple_geometric_edge :
- public Skeleton_blocker_simple_traits::Graph_edge {
+ public Skeleton_blocker_simple_traits::Graph_edge {
int index_;
public:
Simple_geometric_edge()
: Skeleton_blocker_simple_traits::Graph_edge(),
- index_(-1) {
- }
+ index_(-1) { }
+
/**
* @brief Allows to modify the index of the edge.
* The indices of the edge are used to store heap information
@@ -78,6 +80,7 @@ struct Skeleton_blocker_simple_geometric_traits :
int& index() {
return index_;
}
+
int index() const {
return index_;
}
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
index 31bec3b6..3835cf77 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SKELETON_BLOCKER_SIMPLE_TRAITS_H_
#define SKELETON_BLOCKER_SKELETON_BLOCKER_SIMPLE_TRAITS_H_
@@ -48,9 +49,9 @@ struct Skeleton_blocker_simple_traits {
*/
struct Root_vertex_handle {
typedef int boost_vertex_handle;
+
explicit Root_vertex_handle(boost_vertex_handle val = -1)
- : vertex(val) {
- }
+ : vertex(val) { }
boost_vertex_handle vertex;
bool operator!=(const Root_vertex_handle& other) const {
@@ -65,8 +66,8 @@ struct Skeleton_blocker_simple_traits {
return this->vertex < other.vertex;
}
- friend std::ostream& operator <<(std::ostream& o,
- const Root_vertex_handle & v) {
+ friend std::ostream& operator<<(std::ostream& o,
+ const Root_vertex_handle & v) {
o << v.vertex;
return o;
}
@@ -74,11 +75,13 @@ struct Skeleton_blocker_simple_traits {
struct Vertex_handle {
typedef int boost_vertex_handle;
+
explicit Vertex_handle(boost_vertex_handle val = -1)
- : vertex(val) {
- }
+ : vertex(val) { }
- operator int() const { return static_cast<int>(vertex); }
+ operator int() const {
+ return static_cast<int> (vertex);
+ }
boost_vertex_handle vertex;
@@ -94,7 +97,7 @@ struct Skeleton_blocker_simple_traits {
return this->vertex < other.vertex;
}
- friend std::ostream& operator <<(std::ostream& o, const Vertex_handle & v) {
+ friend std::ostream& operator<<(std::ostream& o, const Vertex_handle & v) {
o << v.vertex;
return o;
}
@@ -105,21 +108,24 @@ struct Skeleton_blocker_simple_traits {
Root_vertex_handle id_;
public:
- virtual ~Graph_vertex() {
- }
+ virtual ~Graph_vertex() { }
void activate() {
is_active_ = true;
}
+
void deactivate() {
is_active_ = false;
}
+
bool is_active() const {
return is_active_;
}
+
void set_id(Root_vertex_handle i) {
id_ = i;
}
+
Root_vertex_handle get_id() const {
return id_;
}
@@ -130,7 +136,7 @@ struct Skeleton_blocker_simple_traits {
return res.str();
}
- friend std::ostream& operator <<(std::ostream& o, const Graph_vertex & v) {
+ friend std::ostream& operator<<(std::ostream& o, const Graph_vertex & v) {
o << v.to_string();
return o;
}
@@ -144,13 +150,13 @@ struct Skeleton_blocker_simple_traits {
public:
Graph_edge()
: a_(-1),
- b_(-1),
- index_(-1) {
- }
+ b_(-1),
+ index_(-1) { }
int& index() {
return index_;
}
+
int index() const {
return index_;
}
@@ -168,7 +174,7 @@ struct Skeleton_blocker_simple_traits {
return b_;
}
- friend std::ostream& operator <<(std::ostream& o, const Graph_edge & v) {
+ friend std::ostream& operator<<(std::ostream& o, const Graph_edge & v) {
o << "(" << v.a_ << "," << v.b_ << " - id = " << v.index();
return o;
}
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
index 3c7f1dd5..aa6f2215 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -63,7 +63,6 @@ class Skeleton_blocker_simplex {
*/
//@{
- // Skeleton_blocker_simplex():simplex_set() {}
void clear() {
simplex_set.clear();
}
@@ -89,8 +88,7 @@ class Skeleton_blocker_simplex {
add_vertex(v);
}
- void add_vertices() {
- }
+ void add_vertices() { }
/**
* Initialize a simplex with a string such as {0,1,2}
@@ -192,7 +190,6 @@ class Skeleton_blocker_simplex {
return simplex_set.crend();
}
-
typename std::set<T>::iterator begin() {
return simplex_set.begin();
}
@@ -236,6 +233,7 @@ class Skeleton_blocker_simplex {
assert(!empty());
return *(simplex_set.rbegin());
}
+
/**
* @return true iff the simplex contains the simplex a.
*/
@@ -351,8 +349,8 @@ class Skeleton_blocker_simplex {
//@}
- friend std::ostream& operator <<(std::ostream& o,
- const Skeleton_blocker_simplex & sigma) {
+ friend std::ostream& operator<<(std::ostream& o,
+ const Skeleton_blocker_simplex & sigma) {
bool first = true;
o << "{";
for (auto i : sigma) {
diff --git a/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h b/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
index 196fe8c0..fadf6619 100644
--- a/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
+++ b/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -159,7 +159,7 @@ class Skeleton_blocker_sub_complex : public ComplexType {
if (simplex.contains(*blocker)) {
Root_simplex_handle blocker_root(parent_complex.get_id(*(blocker)));
Simplex blocker_restr(
- *(this->get_simplex_address(blocker_root)));
+ *(this->get_simplex_address(blocker_root)));
this->add_blocker(new Simplex(blocker_restr));
}
}
@@ -190,14 +190,15 @@ class Skeleton_blocker_sub_complex : public ComplexType {
// */
// boost::optional<Simplex> get_address(const Root_simplex_handle & s) const;
-// private:
+ // private:
/**
* same as get_address except that it will return a simplex in any case.
* The vertices that were not found are not added.
*/
// @remark should be private but problem with VS
+
std::vector<boost::optional<Vertex_handle> > get_addresses(
- const Root_simplex_handle & s) const {
+ const Root_simplex_handle & s) const {
std::vector < boost::optional<Vertex_handle> > res;
for (auto i : s) {
res.push_back(get_address(i));
@@ -214,14 +215,14 @@ class Skeleton_blocker_sub_complex : public ComplexType {
*/
template<typename ComplexType>
bool proper_face_in_union(
- Skeleton_blocker_sub_complex<ComplexType> & link,
- std::vector<boost::optional<typename ComplexType::Vertex_handle> > & addresses_sigma_in_link,
- int vertex_to_be_ignored) {
+ Skeleton_blocker_sub_complex<ComplexType> & link,
+ std::vector<boost::optional<typename ComplexType::Vertex_handle> > & addresses_sigma_in_link,
+ std::size_t vertex_to_be_ignored) {
// we test that all vertices of 'addresses_sigma_in_link' but 'vertex_to_be_ignored'
// are in link1 if it is the case we construct the corresponding simplex
bool vertices_sigma_are_in_link = true;
typename ComplexType::Simplex sigma_in_link;
- for (int i = 0; i < addresses_sigma_in_link.size(); ++i) {
+ for (std::size_t i = 0; i < addresses_sigma_in_link.size(); ++i) {
if (i != vertex_to_be_ignored) {
if (!addresses_sigma_in_link[i]) {
vertices_sigma_are_in_link = false;
@@ -236,43 +237,24 @@ bool proper_face_in_union(
return vertices_sigma_are_in_link && link.contains(sigma_in_link);
}
-/*
- template<typename ComplexType>
- bool
- proper_faces_in_union(Skeleton_blocker_simplex<typename ComplexType::Root_vertex_handle> & sigma, Skeleton_blocker_sub_complex<ComplexType> & link1, Skeleton_blocker_sub_complex<ComplexType> & link2)
- {
- typedef typename ComplexType::Vertex_handle Vertex_handle;
- std::vector<boost::optional<Vertex_handle> > addresses_sigma_in_link1 = link1.get_addresses(sigma);
- std::vector<boost::optional<Vertex_handle> > addresses_sigma_in_link2 = link2.get_addresses(sigma);
-
- for (int current_index = 0; current_index < addresses_sigma_in_link1.size(); ++current_index)
- {
-
- if (!proper_face_in_union(link1, addresses_sigma_in_link1, current_index)
- && !proper_face_in_union(link2, addresses_sigma_in_link2, current_index)){
- return false;
- }
- }
- return true;
- }*/
-
// Remark: this function should be friend in order to leave get_adresses private
// however doing so seemes currently not possible due to a visual studio bug c2668
// "the compiler does not support partial ordering of template functions as specified in the C++ Standard"
// http://www.serkey.com/error-c2668-ambiguous-call-to-overloaded-function-bb45ft.html
+
template<typename ComplexType>
bool proper_faces_in_union(
- Skeleton_blocker_simplex<typename ComplexType::Root_vertex_handle> & sigma,
- Skeleton_blocker_sub_complex<ComplexType> & link1,
- Skeleton_blocker_sub_complex<ComplexType> & link2) {
+ Skeleton_blocker_simplex<typename ComplexType::Root_vertex_handle> & sigma,
+ Skeleton_blocker_sub_complex<ComplexType> & link1,
+ Skeleton_blocker_sub_complex<ComplexType> & link2) {
typedef typename ComplexType::Vertex_handle Vertex_handle;
std::vector < boost::optional<Vertex_handle> > addresses_sigma_in_link1 =
link1.get_addresses(sigma);
std::vector < boost::optional<Vertex_handle> > addresses_sigma_in_link2 =
link2.get_addresses(sigma);
- for (int current_index = 0; current_index < addresses_sigma_in_link1.size();
- ++current_index) {
+ for (std::size_t current_index = 0; current_index < addresses_sigma_in_link1.size();
+ ++current_index) {
if (!proper_face_in_union(link1, addresses_sigma_in_link1, current_index)
&& !proper_face_in_union(link2, addresses_sigma_in_link2,
current_index)) {
@@ -289,4 +271,3 @@ namespace skbl = skeleton_blocker;
} // namespace Gudhi
#endif // SKELETON_BLOCKER_SKELETON_BLOCKER_SUB_COMPLEX_H_
-
diff --git a/include/gudhi/Skeleton_blocker/internal/Top_faces.h b/include/gudhi/Skeleton_blocker/internal/Top_faces.h
index 39d95661..2b681752 100644
--- a/include/gudhi/Skeleton_blocker/internal/Top_faces.h
+++ b/include/gudhi/Skeleton_blocker/internal/Top_faces.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_INTERNAL_TOP_FACES_H_
#define SKELETON_BLOCKER_INTERNAL_TOP_FACES_H_
@@ -69,4 +70,4 @@ namespace skbl = skeleton_blocker;
} // namespace Gudhi
-#endif // SKELETON_BLOCKER_INTERNAL_TOP_FACES_H_
+#endif // SKELETON_BLOCKER_INTERNAL_TOP_FACES_H_
diff --git a/include/gudhi/Skeleton_blocker/internal/Trie.h b/include/gudhi/Skeleton_blocker/internal/Trie.h
index cdc47b8a..2c9602fa 100644
--- a/include/gudhi/Skeleton_blocker/internal/Trie.h
+++ b/include/gudhi/Skeleton_blocker/internal/Trie.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Méditerranée (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,7 +21,6 @@
*
*/
-
#ifndef SKELETON_BLOCKER_INTERNAL_TRIE_H_
#define SKELETON_BLOCKER_INTERNAL_TRIE_H_
@@ -148,7 +147,7 @@ struct Trie {
}
void remove_leaf() {
- assert(is_leaf);
+ assert(is_leaf());
if (!is_root())
parent_->childs.erase(this);
}
@@ -240,7 +239,7 @@ struct Tries {
std::vector<Simplex> next_dimension_simplices() const {
std::vector<Simplex> res;
- while (!to_see_.empty() && to_see_.front()->simplex().dimension() == current_dimension_) {
+ while (!(to_see_.empty()) && (to_see_.front()->simplex().dimension() == current_dimension_)) {
res.emplace_back(to_see_.front()->simplex());
for (auto child : to_see_.front()->childs)
to_see_.push_back(child.get());
@@ -257,7 +256,7 @@ struct Tries {
private:
mutable std::deque<STrie*> to_see_;
- mutable unsigned current_dimension_ = 0;
+ mutable int current_dimension_ = 0;
std::vector<STrie*> cofaces_;
};
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_blockers_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_blockers_iterators.h
index 4dbc9ed3..d2fff960 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_blockers_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_blockers_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_BLOCKERS_ITERATORS_H_
#define SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_BLOCKERS_ITERATORS_H_
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_edges_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_edges_iterators.h
index 15618932..b90dcf34 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_edges_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_edges_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_EDGES_ITERATORS_H_
#define SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_EDGES_ITERATORS_H_
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_iterators.h
index cc3ed276..1351614f 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_simplices_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_simplices_iterators.h
index 3b941be5..2acdb555 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_simplices_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_simplices_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_SIMPLICES_ITERATORS_H_
#define SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_SIMPLICES_ITERATORS_H_
@@ -68,10 +69,11 @@ public boost::iterator_facade < Simplex_around_vertex_iterator<SkeletonBlockerCo
Vertex_handle v;
std::shared_ptr<Link> link_v;
std::shared_ptr<Trie> trie;
- std::list<Trie*> nodes_to_be_seen; // todo deque
+ // TODO(DS): use a deque instead
+ std::list<Trie*> nodes_to_be_seen;
public:
- Simplex_around_vertex_iterator() : complex(0) {}
+ Simplex_around_vertex_iterator() : complex(0) { }
Simplex_around_vertex_iterator(const Complex* complex_, Vertex_handle v_) :
complex(complex_),
@@ -81,15 +83,16 @@ public boost::iterator_facade < Simplex_around_vertex_iterator<SkeletonBlockerCo
compute_trie_and_nodes_to_be_seen();
}
- // todo avoid useless copy
- // todo currently just work if copy begin iterator
+ // TODO(DS): avoid useless copy
+ // TODO(DS): currently just work if copy begin iterator
Simplex_around_vertex_iterator(const Simplex_around_vertex_iterator& other) :
complex(other.complex),
v(other.v),
link_v(other.link_v),
trie(other.trie),
nodes_to_be_seen(other.nodes_to_be_seen) {
- if (!other.is_end()) {}
+ if (!other.is_end()) {
+ }
}
/**
@@ -159,7 +162,8 @@ public boost::iterator_facade < Simplex_around_vertex_iterator<SkeletonBlockerCo
bool both_non_empty = !nodes_to_be_seen.empty() && !other.nodes_to_be_seen.empty();
- if (!both_non_empty) return false; // one is empty the other is not
+ // one is empty the other is not
+ if (!both_non_empty) return false;
bool same_node = (**(nodes_to_be_seen.begin()) == **(other.nodes_to_be_seen.begin()));
return same_node;
@@ -238,7 +242,6 @@ public boost::iterator_facade < Simplex_iterator<SkeletonBlockerComplex>
}
private:
- // todo return to private
Simplex_iterator(const Complex* complex, bool end) :
complex_(complex) {
set_end();
@@ -306,7 +309,7 @@ public boost::iterator_facade < Simplex_iterator<SkeletonBlockerComplex>
/**
* Iterator through the maximal faces of the coboundary of a simplex.
- */
+ */
template<typename SkeletonBlockerComplex, typename Link>
class Simplex_coboundary_iterator :
public boost::iterator_facade < Simplex_coboundary_iterator<SkeletonBlockerComplex, Link>
@@ -329,12 +332,12 @@ public boost::iterator_facade < Simplex_coboundary_iterator<SkeletonBlockerCompl
Complex_vertex_iterator link_vertex_end;
public:
- Simplex_coboundary_iterator() : complex(0) {}
+ Simplex_coboundary_iterator() : complex(0) { }
Simplex_coboundary_iterator(const Complex* complex_, const Simplex& sigma_) :
complex(complex_),
sigma(sigma_),
- //need only vertices of the link hence the true flag
+ // need only vertices of the link hence the true flag
link(new Link(*complex_, sigma_, false, true)) {
auto link_vertex_range = link->vertex_range();
current_vertex = link_vertex_range.begin();
@@ -347,9 +350,9 @@ public boost::iterator_facade < Simplex_coboundary_iterator<SkeletonBlockerCompl
link(other.link),
current_vertex(other.current_vertex),
link_vertex_end(other.link_vertex_end) { }
-
+
// returns an iterator to the end
- Simplex_coboundary_iterator(const Complex* complex_,const Simplex& sigma_, bool end) :
+ Simplex_coboundary_iterator(const Complex* complex_, const Simplex& sigma_, bool end) :
complex(complex_),
sigma(sigma_) {
// to represent an end iterator without having to build a useless link, we use
@@ -361,7 +364,7 @@ public boost::iterator_facade < Simplex_coboundary_iterator<SkeletonBlockerCompl
return complex->convert_handle_from_another_complex(*link, link_vh);
}
-public:
+ public:
friend std::ostream& operator<<(std::ostream& stream, const Simplex_coboundary_iterator& sci) {
return stream;
}
@@ -369,8 +372,8 @@ public:
// assume that iterator points to the same complex and comes from the same simplex
bool equal(const Simplex_coboundary_iterator& other) const {
assert(complex == other.complex && sigma == other.sigma);
- if(is_end()) return other.is_end();
- if(other.is_end()) return is_end();
+ if (is_end()) return other.is_end();
+ if (other.is_end()) return is_end();
return *current_vertex == *(other.current_vertex);
}
@@ -384,13 +387,12 @@ public:
return res;
}
-private:
+ private:
bool is_end() const {
return !link || current_vertex == link_vertex_end;
}
};
-
} // namespace skeleton_blocker
namespace skbl = skeleton_blocker;
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
index b2dd9a21..736941dd 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_TRIANGLES_ITERATORS_H_
#define SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_TRIANGLES_ITERATORS_H_
@@ -135,7 +136,7 @@ typename SkeletonBlockerComplex::Simplex const
Triangle_iterator(const SkeletonBlockerComplex* complex) :
complex_(complex),
current_vertex_(complex->vertex_range().begin()),
- current_triangle_(complex, *current_vertex_), // xxx this line is problematic is the complex is empty
+ current_triangle_(complex, *current_vertex_), // this line is problematic is the complex is empty
is_end_(false) {
assert(!complex->empty());
gotoFirstTriangle();
@@ -172,7 +173,8 @@ typename SkeletonBlockerComplex::Simplex const
bool both_arent_finished = !is_finished() && !other.is_finished();
// if the two iterators are not finished, they must have the same state
return (complex_ == other.complex_) && (both_are_finished || ((both_arent_finished) &&
- current_vertex_ == other.current_vertex_ && current_triangle_ == other.current_triangle_));
+ current_vertex_ == other.current_vertex_ &&
+ current_triangle_ == other.current_triangle_));
}
Simplex dereference() const {
@@ -183,8 +185,10 @@ typename SkeletonBlockerComplex::Simplex const
// goto the next vertex that has a triangle pending or the
// end vertex iterator if none exists
void goto_next_vertex() {
- assert(current_triangle_.finished()); // we mush have consume all triangles passing through the vertex
- assert(!is_finished()); // we must not be done
+ // we must have consume all triangles passing through the vertex
+ assert(current_triangle_.finished());
+ // we must not be done
+ assert(!is_finished());
++current_vertex_;
diff --git a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_vertices_iterators.h b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_vertices_iterators.h
index f06cab71..9e9ae961 100644
--- a/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_vertices_iterators.h
+++ b/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_vertices_iterators.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_VERTICES_ITERATORS_H_
#define SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_VERTICES_ITERATORS_H_
@@ -103,7 +104,7 @@ class Vertex_iterator : public boost::iterator_facade< Vertex_iterator <Skeleton
};
template<typename SkeletonBlockerComplex>
-class Neighbors_vertices_iterator: public boost::iterator_facade < Neighbors_vertices_iterator<SkeletonBlockerComplex>
+class Neighbors_vertices_iterator : public boost::iterator_facade < Neighbors_vertices_iterator<SkeletonBlockerComplex>
, typename SkeletonBlockerComplex::Vertex_handle const
, boost::forward_traversal_tag
, typename SkeletonBlockerComplex::Vertex_handle const> {
@@ -122,9 +123,6 @@ class Neighbors_vertices_iterator: public boost::iterator_facade < Neighbors_ver
boost_adjacency_iterator end_;
public:
- // boost_adjacency_iterator ai, ai_end;
- // for (tie(ai, ai_end) = adjacent_vertices(v.vertex, skeleton); ai != ai_end; ++ai) {
-
Neighbors_vertices_iterator() : complex(NULL) { }
Neighbors_vertices_iterator(const Complex* complex_, Vertex_handle v_) :
@@ -157,7 +155,7 @@ class Neighbors_vertices_iterator: public boost::iterator_facade < Neighbors_ver
}
private:
- // todo remove this ugly hack
+ // TODO(DS): remove this ugly hack
void set_end() {
current_ = end_;
}
@@ -170,5 +168,3 @@ namespace skbl = skeleton_blocker;
} // namespace Gudhi
#endif // SKELETON_BLOCKER_ITERATORS_SKELETON_BLOCKERS_VERTICES_ITERATORS_H_
-
-
diff --git a/include/gudhi/Skeleton_blocker_complex.h b/include/gudhi/Skeleton_blocker_complex.h
index 7a6d1d50..4f052ba5 100644
--- a/include/gudhi/Skeleton_blocker_complex.h
+++ b/include/gudhi/Skeleton_blocker_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -211,7 +211,6 @@ class Skeleton_blocker_complex {
add_edge_without_blockers(e.first, e.second);
}
-
template<typename SimpleHandleOutputIterator>
void add_blockers(SimpleHandleOutputIterator simplices_begin, SimpleHandleOutputIterator simplices_end) {
Tries<Simplex> tries(num_vertices(), simplices_begin, simplices_end);
@@ -414,7 +413,8 @@ class Skeleton_blocker_complex {
/**
*/
bool contains_vertex(Vertex_handle u) const {
- if (u.vertex < 0 || u.vertex >= boost::num_vertices(skeleton))
+ Vertex_handle num_vertices(boost::num_vertices(skeleton));
+ if (u.vertex < 0 || u.vertex >= num_vertices)
return false;
return (*this)[u].is_active();
}
@@ -441,11 +441,11 @@ class Skeleton_blocker_complex {
* @brief Given an Id return the address of the vertex having this Id in the complex.
* @remark For a simplicial complex, the address is the id but it may not be the case for a SubComplex.
*/
- virtual boost::optional<Vertex_handle> get_address(
- Root_vertex_handle id) const {
+ virtual boost::optional<Vertex_handle> get_address(Root_vertex_handle id) const {
boost::optional<Vertex_handle> res;
- if (id.vertex < boost::num_vertices(skeleton))
- res = Vertex_handle(id.vertex); // xxx
+ int num_vertices = boost::num_vertices(skeleton);
+ if (id.vertex < num_vertices)
+ res = Vertex_handle(id.vertex);
return res;
}
@@ -560,7 +560,7 @@ class Skeleton_blocker_complex {
return res;
}
- /**
+ /**
* @brief Adds all edges of s in the complex.
*/
void add_edge(const Simplex& s) {
@@ -591,7 +591,6 @@ class Skeleton_blocker_complex {
return *edge_handle;
}
-
/**
* @brief Adds all edges of s in the complex without adding blockers.
*/
@@ -844,12 +843,13 @@ class Skeleton_blocker_complex {
boost_adjacency_iterator ai, ai_end;
for (tie(ai, ai_end) = adjacent_vertices(v.vertex, skeleton); ai != ai_end;
++ai) {
+ Vertex_handle value(*ai);
if (keep_only_superior) {
- if (*ai > v.vertex) {
- n.add_vertex(Vertex_handle(*ai));
+ if (value > v.vertex) {
+ n.add_vertex(value);
}
} else {
- n.add_vertex(Vertex_handle(*ai));
+ n.add_vertex(value);
}
}
}
@@ -929,7 +929,7 @@ class Skeleton_blocker_complex {
// xxx rename get_address et place un using dans sub_complex
boost::optional<Simplex> get_simplex_address(
- const Root_simplex_handle& s) const {
+ const Root_simplex_handle& s) const {
boost::optional<Simplex> res;
Simplex s_address;
@@ -1001,10 +1001,9 @@ class Skeleton_blocker_complex {
return std::distance(triangles.begin(), triangles.end());
}
-
/*
* @brief returns the number of simplices of a given dimension in the complex.
- */
+ */
size_t num_simplices() const {
auto simplices = complex_simplex_range();
return std::distance(simplices.begin(), simplices.end());
@@ -1012,8 +1011,8 @@ class Skeleton_blocker_complex {
/*
* @brief returns the number of simplices of a given dimension in the complex.
- */
- size_t num_simplices(unsigned dimension) const {
+ */
+ size_t num_simplices(int dimension) const {
// TODO(DS): iterator on k-simplices
size_t res = 0;
for (const auto& s : complex_simplex_range())
diff --git a/include/gudhi/Skeleton_blocker_geometric_complex.h b/include/gudhi/Skeleton_blocker_geometric_complex.h
index 1130ca9f..95331b7a 100644
--- a/include/gudhi/Skeleton_blocker_geometric_complex.h
+++ b/include/gudhi/Skeleton_blocker_geometric_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_GEOMETRIC_COMPLEX_H_
#define SKELETON_BLOCKER_GEOMETRIC_COMPLEX_H_
diff --git a/include/gudhi/Skeleton_blocker_link_complex.h b/include/gudhi/Skeleton_blocker_link_complex.h
index 1bd66289..4db075b0 100644
--- a/include/gudhi/Skeleton_blocker_link_complex.h
+++ b/include/gudhi/Skeleton_blocker_link_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_LINK_COMPLEX_H_
#define SKELETON_BLOCKER_LINK_COMPLEX_H_
@@ -39,7 +40,7 @@ template<class ComplexType> class Skeleton_blocker_sub_complex;
*/
template<typename ComplexType>
class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
- ComplexType> {
+ComplexType> {
template<typename T> friend class Skeleton_blocker_link_superior;
typedef typename ComplexType::Edge_handle Edge_handle;
@@ -60,8 +61,7 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
typedef typename ComplexType::Root_simplex_handle::Simplex_vertex_const_iterator Root_simplex_handle_iterator;
explicit Skeleton_blocker_link_complex(bool only_superior_vertices = false)
- : only_superior_vertices_(only_superior_vertices) {
- }
+ : only_superior_vertices_(only_superior_vertices) { }
/**
* If the parameter only_superior_vertices is true,
@@ -95,10 +95,10 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
*/
Skeleton_blocker_link_complex(const ComplexType & parent_complex,
Edge_handle edge, bool only_superior_vertices =
- false)
+ false)
: only_superior_vertices_(only_superior_vertices) {
Simplex alpha_simplex(parent_complex.first_vertex(edge),
- parent_complex.second_vertex(edge));
+ parent_complex.second_vertex(edge));
build_link(parent_complex, alpha_simplex);
}
@@ -151,7 +151,7 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
bool only_superior_vertices) {
// for a vertex we know exactly the number of vertices of the link (and the size of the corresponding vector
this->skeleton.m_vertices.reserve(
- parent_complex.degree(alpha_parent_adress));
+ parent_complex.degree(alpha_parent_adress));
// For all vertex 'v' in this intersection, we go through all its adjacent blockers.
// If one blocker minus 'v' is included in alpha then the vertex is not in the link complex.
@@ -169,21 +169,21 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
return;
for (auto x_link = this->vertex_range().begin();
- x_link != this->vertex_range().end(); ++x_link) {
+ x_link != this->vertex_range().end(); ++x_link) {
for (auto y_link = x_link; ++y_link != this->vertex_range().end();) {
Vertex_handle x_parent = *parent_complex.get_address(
- this->get_id(*x_link));
+ this->get_id(*x_link));
Vertex_handle y_parent = *parent_complex.get_address(
- this->get_id(*y_link));
+ this->get_id(*y_link));
if (parent_complex.contains_edge(x_parent, y_parent)) {
// we check that there is no blocker subset of alpha passing trough x and y
bool new_edge = true;
for (auto blocker_parent : parent_complex.const_blocker_range(
- x_parent)) {
+ x_parent)) {
if (!is_alpha_blocker || *blocker_parent != alpha_parent_adress) {
if (blocker_parent->contains(y_parent)) {
new_edge = !(alpha_parent_adress.contains_difference(
- *blocker_parent, x_parent, y_parent));
+ *blocker_parent, x_parent, y_parent));
if (!new_edge)
break;
}
@@ -201,8 +201,8 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
* corresponding address in 'other_complex'.
* It assumes that other_complex have a vertex 'this.get_id(address)'
*/
- boost::optional<Vertex_handle> give_equivalent_vertex(
- const ComplexType & other_complex, Vertex_handle address) const {
+ boost::optional<Vertex_handle> give_equivalent_vertex(const ComplexType & other_complex,
+ Vertex_handle address) const {
Root_vertex_handle id((*this)[address].get_id());
return other_complex.get_address(id);
}
@@ -269,7 +269,7 @@ class Skeleton_blocker_link_complex : public Skeleton_blocker_sub_complex<
bool only_vertices = false) {
assert(is_alpha_blocker || parent_complex.contains(alpha_parent_adress));
compute_link_vertices(parent_complex, alpha_parent_adress, only_superior_vertices_);
- if(!only_vertices) {
+ if (!only_vertices) {
compute_link_edges(parent_complex, alpha_parent_adress, is_alpha_blocker);
compute_link_blockers(parent_complex, alpha_parent_adress, is_alpha_blocker);
}
diff --git a/include/gudhi/Skeleton_blocker_simplifiable_complex.h b/include/gudhi/Skeleton_blocker_simplifiable_complex.h
index 171efd4b..544e02e8 100644
--- a/include/gudhi/Skeleton_blocker_simplifiable_complex.h
+++ b/include/gudhi/Skeleton_blocker_simplifiable_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#ifndef SKELETON_BLOCKER_SIMPLIFIABLE_COMPLEX_H_
#define SKELETON_BLOCKER_SIMPLIFIABLE_COMPLEX_H_
@@ -33,7 +34,7 @@ namespace Gudhi {
namespace skeleton_blocker {
/**
- * Returns true iff the blocker 'sigma' is popable.
+ * Returns true if the blocker 'sigma' is popable.
* To define popable, let us call 'L' the complex that
* consists in the current complex without the blocker 'sigma'.
* A blocker 'sigma' is then "popable" if the link of 'sigma'
@@ -145,8 +146,7 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_star(Vertex_handle v) {
* whenever the dimension of tau is at least 2.
*/
template<typename SkeletonBlockerDS>
-void Skeleton_blocker_complex<SkeletonBlockerDS>::update_blockers_after_remove_star_of_vertex_or_edge(
- const Simplex& simplex_to_be_removed) {
+void Skeleton_blocker_complex<SkeletonBlockerDS>::update_blockers_after_remove_star_of_vertex_or_edge(const Simplex& simplex_to_be_removed) {
std::list <Blocker_handle> blockers_to_update;
if (simplex_to_be_removed.empty()) return;
@@ -224,8 +224,6 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::add_simplex(const Simplex& sig
add_blockers_after_simplex_insertion(sigma);
}
-
-
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::add_blockers_after_simplex_insertion(Simplex sigma) {
if (sigma.dimension() < 1) return;
@@ -385,7 +383,8 @@ Skeleton_blocker_complex<SkeletonBlockerDS>::contract_edge(Vertex_handle a, Vert
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::get_blockers_to_be_added_after_contraction(Vertex_handle a,
- Vertex_handle b, std::set<Simplex>& blockers_to_add) {
+ Vertex_handle b,
+ std::set<Simplex>& blockers_to_add) {
blockers_to_add.clear();
typedef Skeleton_blocker_link_complex<Skeleton_blocker_complex<SkeletonBlockerDS> > LinkComplexType;
diff --git a/include/gudhi/Strong_witness_complex.h b/include/gudhi/Strong_witness_complex.h
new file mode 100644
index 00000000..a973ddb7
--- /dev/null
+++ b/include/gudhi/Strong_witness_complex.h
@@ -0,0 +1,185 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2015 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef STRONG_WITNESS_COMPLEX_H_
+#define STRONG_WITNESS_COMPLEX_H_
+
+#include <gudhi/Active_witness/Active_witness.h>
+
+#include <utility>
+#include <vector>
+#include <list>
+#include <limits>
+
+namespace Gudhi {
+
+namespace witness_complex {
+
+/* \private
+ * \class Strong_witness_complex
+ * \brief Constructs strong witness complex for a given table of nearest landmarks with respect to witnesses.
+ * \ingroup witness_complex
+ *
+ * \tparam Nearest_landmark_table_ needs to be a range of a range of pairs of nearest landmarks and distances.
+ * The class Nearest_landmark_table_::value_type must be a copiable range.
+ * The range of pairs must admit a member type 'iterator'. The dereference type
+ * of the pair range iterator needs to be 'std::pair<std::size_t, double>'.
+ */
+template< class Nearest_landmark_table_ >
+class Strong_witness_complex {
+ private:
+ typedef typename Nearest_landmark_table_::value_type Nearest_landmark_range;
+ typedef std::size_t Witness_id;
+ typedef std::size_t Landmark_id;
+ typedef std::pair<Landmark_id, double> Id_distance_pair;
+ typedef Active_witness<Id_distance_pair, Nearest_landmark_range> ActiveWitness;
+ typedef std::list< ActiveWitness > ActiveWitnessList;
+ typedef std::vector< Landmark_id > typeVectorVertex;
+ typedef std::vector<Nearest_landmark_range> Nearest_landmark_table_internal;
+ typedef Landmark_id Vertex_handle;
+
+ protected:
+ Nearest_landmark_table_internal nearest_landmark_table_;
+
+ public:
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /* @name Constructor
+ */
+
+ //@{
+
+ Strong_witness_complex() {
+ }
+
+ /**
+ * \brief Initializes member variables before constructing simplicial complex.
+ * \details Records nearest landmark table.
+ * @param[in] nearest_landmark_table needs to be a range of a range of pairs of nearest landmarks and distances.
+ * The class Nearest_landmark_table_::value_type must be a copiable range.
+ * The range of pairs must admit a member type 'iterator'. The dereference type
+ * of the pair range iterator needs to be 'std::pair<std::size_t, double>'.
+ */
+ Strong_witness_complex(Nearest_landmark_table_ const & nearest_landmark_table)
+ : nearest_landmark_table_(std::begin(nearest_landmark_table), std::end(nearest_landmark_table)) {
+ }
+
+ /** \brief Outputs the strong witness complex of relaxation 'max_alpha_square'
+ * in a simplicial complex data structure.
+ * \details The function returns true if the construction is successful and false otherwise.
+ * @param[out] complex Simplicial complex data structure, which is a model of
+ * SimplicialComplexForWitness concept.
+ * @param[in] max_alpha_square Maximal squared relaxation parameter.
+ * @param[in] limit_dimension Represents the maximal dimension of the simplicial complex
+ * (default value = no limit).
+ */
+ template < typename SimplicialComplexForWitness >
+ bool create_complex(SimplicialComplexForWitness& complex,
+ double max_alpha_square,
+ Landmark_id limit_dimension = std::numeric_limits<Landmark_id>::max()) const {
+ Landmark_id complex_dim = 0;
+ if (complex.num_vertices() > 0) {
+ std::cerr << "Strong witness complex cannot create complex - complex is not empty.\n";
+ return false;
+ }
+ if (max_alpha_square < 0) {
+ std::cerr << "Strong witness complex cannot create complex - squared relaxation parameter must be "
+ << "non-negative.\n";
+ return false;
+ }
+ if (limit_dimension < 0) {
+ std::cerr << "Strong witness complex cannot create complex - limit dimension must be non-negative.\n";
+ return false;
+ }
+ for (auto w : nearest_landmark_table_) {
+ ActiveWitness aw(w);
+ typeVectorVertex simplex;
+ typename ActiveWitness::iterator aw_it = aw.begin();
+ float lim_dist2 = aw.begin()->second + max_alpha_square;
+ while ((Landmark_id)simplex.size() <= limit_dimension && aw_it != aw.end() && aw_it->second < lim_dist2) {
+ simplex.push_back(aw_it->first);
+ complex.insert_simplex_and_subfaces(simplex, aw_it->second - aw.begin()->second);
+ aw_it++;
+ }
+ // continue inserting limD-faces of the following simplices
+ typeVectorVertex& vertices = simplex; // 'simplex' now will be called vertices
+ while (aw_it != aw.end() && aw_it->second < lim_dist2) {
+ typeVectorVertex facet = {};
+ add_all_faces_of_dimension(limit_dimension, vertices, vertices.begin(), aw_it,
+ aw_it->second - aw.begin()->second, facet, complex);
+ vertices.push_back(aw_it->first);
+ aw_it++;
+ }
+ if ((Landmark_id)simplex.size() - 1 > complex_dim)
+ complex_dim = simplex.size() - 1;
+ }
+ complex.set_dimension(complex_dim);
+ return true;
+ }
+
+ private:
+ /* \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
+ * Iterator is needed to know until how far we can take landmarks to form simplexes.
+ * simplex is the prefix of the simplexes to insert.
+ * The landmark pointed by aw_it is added to all formed simplices.
+ */
+ template < typename SimplicialComplexForWitness >
+ void add_all_faces_of_dimension(Landmark_id dim,
+ typeVectorVertex& vertices,
+ typename typeVectorVertex::iterator curr_it,
+ typename ActiveWitness::iterator aw_it,
+ double filtration_value,
+ typeVectorVertex& simplex,
+ SimplicialComplexForWitness& sc) const {
+ if (dim > 0) {
+ while (curr_it != vertices.end()) {
+ simplex.push_back(*curr_it);
+ ++curr_it;
+ add_all_faces_of_dimension(dim-1,
+ vertices,
+ curr_it,
+ aw_it,
+ filtration_value,
+ simplex,
+ sc);
+ simplex.pop_back();
+ add_all_faces_of_dimension(dim,
+ vertices,
+ curr_it,
+ aw_it,
+ filtration_value,
+ simplex,
+ sc);
+ }
+ } else if (dim == 0) {
+ simplex.push_back(aw_it->first);
+ sc.insert_simplex_and_subfaces(simplex, filtration_value);
+ simplex.pop_back();
+ }
+ }
+ //@}
+};
+
+} // namespace witness_complex
+
+} // namespace Gudhi
+
+#endif // STRONG_WITNESS_COMPLEX_H_
diff --git a/include/gudhi/Tangential_complex.h b/include/gudhi/Tangential_complex.h
new file mode 100644
index 00000000..cfc82eb1
--- /dev/null
+++ b/include/gudhi/Tangential_complex.h
@@ -0,0 +1,2276 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TANGENTIAL_COMPLEX_H_
+#define TANGENTIAL_COMPLEX_H_
+
+#include <gudhi/Tangential_complex/config.h>
+#include <gudhi/Tangential_complex/Simplicial_complex.h>
+#include <gudhi/Tangential_complex/utilities.h>
+#include <gudhi/Kd_tree_search.h>
+#include <gudhi/console_color.h>
+#include <gudhi/Clock.h>
+#include <gudhi/Simplex_tree.h>
+
+#include <CGAL/Default.h>
+#include <CGAL/Dimension.h>
+#include <CGAL/function_objects.h> // for CGAL::Identity
+#include <CGAL/Epick_d.h>
+#include <CGAL/Regular_triangulation_traits_adapter.h>
+#include <CGAL/Regular_triangulation.h>
+#include <CGAL/Delaunay_triangulation.h>
+#include <CGAL/Combination_enumerator.h>
+#include <CGAL/point_generators_d.h>
+
+#include <Eigen/Core>
+#include <Eigen/Eigen>
+
+#include <boost/optional.hpp>
+#include <boost/iterator/transform_iterator.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+#include <boost/range/counting_range.hpp>
+#include <boost/math/special_functions/factorials.hpp>
+#include <boost/container/flat_set.hpp>
+
+#include <tuple>
+#include <vector>
+#include <set>
+#include <utility>
+#include <sstream>
+#include <iostream>
+#include <limits>
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <cmath> // for std::sqrt
+#include <string>
+#include <cstddef> // for std::size_t
+
+#ifdef GUDHI_USE_TBB
+#include <tbb/parallel_for.h>
+#include <tbb/combinable.h>
+#include <tbb/mutex.h>
+#endif
+
+// #define GUDHI_TC_EXPORT_NORMALS // Only for 3D surfaces (k=2, d=3)
+
+namespace sps = Gudhi::spatial_searching;
+
+namespace Gudhi {
+
+namespace tangential_complex {
+
+using namespace internal;
+
+class Vertex_data {
+ public:
+ Vertex_data(std::size_t data = (std::numeric_limits<std::size_t>::max)())
+ : m_data(data) { }
+
+ operator std::size_t() {
+ return m_data;
+ }
+
+ operator std::size_t() const {
+ return m_data;
+ }
+
+ private:
+ std::size_t m_data;
+};
+
+/**
+ * \class Tangential_complex Tangential_complex.h gudhi/Tangential_complex.h
+ * \brief Tangential complex data structure.
+ *
+ * \ingroup tangential_complex
+ *
+ * \details
+ * The class Tangential_complex represents a tangential complex.
+ * After the computation of the complex, an optional post-processing called perturbation can
+ * be run to attempt to remove inconsistencies.
+ *
+ * \tparam Kernel_ requires a <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a> class, which
+ * can be static if you know the ambiant dimension at compile-time, or dynamic if you don't.
+ * \tparam DimensionTag can be either <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_23/classCGAL_1_1Dimension__tag.html">Dimension_tag<d></a>
+ * if you know the intrinsic dimension at compile-time,
+ * or <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_23/classCGAL_1_1Dynamic__dimension__tag.html">CGAL::Dynamic_dimension_tag</a>
+ * if you don't.
+ * \tparam Concurrency_tag enables sequential versus parallel computation. Possible values are `CGAL::Parallel_tag` (the default) and `CGAL::Sequential_tag`.
+ * \tparam Triangulation_ is the type used for storing the local regular triangulations. We highly recommend to use the default value (`CGAL::Regular_triangulation`).
+ *
+ */
+template
+<
+ typename Kernel_, // ambiant kernel
+ typename DimensionTag, // intrinsic dimension
+ typename Concurrency_tag = CGAL::Parallel_tag,
+ typename Triangulation_ = CGAL::Default
+>
+class Tangential_complex {
+ typedef Kernel_ K;
+ typedef typename K::FT FT;
+ typedef typename K::Point_d Point;
+ typedef typename K::Weighted_point_d Weighted_point;
+ typedef typename K::Vector_d Vector;
+
+ typedef typename CGAL::Default::Get
+ <
+ Triangulation_,
+ CGAL::Regular_triangulation
+ <
+ CGAL::Epick_d<DimensionTag>,
+ CGAL::Triangulation_data_structure
+ <
+ typename CGAL::Epick_d<DimensionTag>::Dimension,
+ CGAL::Triangulation_vertex
+ <
+ CGAL::Regular_triangulation_traits_adapter< CGAL::Epick_d<DimensionTag> >, Vertex_data
+ >,
+ CGAL::Triangulation_full_cell<CGAL::Regular_triangulation_traits_adapter< CGAL::Epick_d<DimensionTag> > >
+ >
+ >
+ >::type Triangulation;
+ typedef typename Triangulation::Geom_traits Tr_traits;
+ typedef typename Triangulation::Weighted_point Tr_point;
+ typedef typename Triangulation::Bare_point Tr_bare_point;
+ typedef typename Triangulation::Vertex_handle Tr_vertex_handle;
+ typedef typename Triangulation::Full_cell_handle Tr_full_cell_handle;
+ typedef typename Tr_traits::Vector_d Tr_vector;
+
+#if defined(GUDHI_USE_TBB)
+ typedef tbb::mutex Mutex_for_perturb;
+ typedef Vector Translation_for_perturb;
+ typedef std::vector<Atomic_wrapper<FT> > Weights;
+#else
+ typedef Vector Translation_for_perturb;
+ typedef std::vector<FT> Weights;
+#endif
+ typedef std::vector<Translation_for_perturb> Translations_for_perturb;
+
+ // Store a local triangulation and a handle to its center vertex
+
+ struct Tr_and_VH {
+ public:
+ Tr_and_VH()
+ : m_tr(NULL) { }
+
+ Tr_and_VH(int dim)
+ : m_tr(new Triangulation(dim)) { }
+
+ ~Tr_and_VH() {
+ destroy_triangulation();
+ }
+
+ Triangulation & construct_triangulation(int dim) {
+ delete m_tr;
+ m_tr = new Triangulation(dim);
+ return tr();
+ }
+
+ void destroy_triangulation() {
+ delete m_tr;
+ m_tr = NULL;
+ }
+
+ Triangulation & tr() {
+ return *m_tr;
+ }
+
+ Triangulation const& tr() const {
+ return *m_tr;
+ }
+
+ Tr_vertex_handle const& center_vertex() const {
+ return m_center_vertex;
+ }
+
+ Tr_vertex_handle & center_vertex() {
+ return m_center_vertex;
+ }
+
+ private:
+ Triangulation* m_tr;
+ Tr_vertex_handle m_center_vertex;
+ };
+
+ public:
+ typedef Basis<K> Tangent_space_basis;
+ typedef Basis<K> Orthogonal_space_basis;
+ typedef std::vector<Tangent_space_basis> TS_container;
+ typedef std::vector<Orthogonal_space_basis> OS_container;
+
+ typedef std::vector<Point> Points;
+
+ typedef boost::container::flat_set<std::size_t> Simplex;
+ typedef std::set<Simplex> Simplex_set;
+
+ private:
+ typedef sps::Kd_tree_search<K, Points> Points_ds;
+ typedef typename Points_ds::KNS_range KNS_range;
+ typedef typename Points_ds::INS_range INS_range;
+
+ typedef std::vector<Tr_and_VH> Tr_container;
+ typedef std::vector<Vector> Vectors;
+
+ // An Incident_simplex is the list of the vertex indices
+ // except the center vertex
+ typedef boost::container::flat_set<std::size_t> Incident_simplex;
+ typedef std::vector<Incident_simplex> Star;
+ typedef std::vector<Star> Stars_container;
+
+ // For transform_iterator
+
+ static const Tr_point &vertex_handle_to_point(Tr_vertex_handle vh) {
+ return vh->point();
+ }
+
+ template <typename P, typename VH>
+ static const P &vertex_handle_to_point(VH vh) {
+ return vh->point();
+ }
+
+ public:
+ typedef internal::Simplicial_complex Simplicial_complex;
+
+ /** \brief Constructor from a range of points.
+ * Points are copied into the instance, and a search data structure is initialized.
+ * Note the complex is not computed: `compute_tangential_complex` must be called after the creation
+ * of the object.
+ *
+ * @param[in] points Range of points (`Point_range::value_type` must be the same as `Kernel_::Point_d`).
+ * @param[in] intrinsic_dimension Intrinsic dimension of the manifold.
+ * @param[in] k Kernel instance.
+ */
+ template <typename Point_range>
+ Tangential_complex(Point_range points,
+ int intrinsic_dimension,
+#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
+ InputIterator first_for_tse, InputIterator last_for_tse,
+#endif
+ const K &k = K()
+ )
+ : m_k(k),
+ m_intrinsic_dim(intrinsic_dimension),
+ m_ambient_dim(points.empty() ? 0 : k.point_dimension_d_object()(*points.begin())),
+ m_points(points.begin(), points.end()),
+ m_weights(m_points.size(), FT(0))
+#if defined(GUDHI_USE_TBB) && defined(GUDHI_TC_PERTURB_POSITION)
+ , m_p_perturb_mutexes(NULL)
+#endif
+ , m_points_ds(m_points)
+ , m_last_max_perturb(0.)
+ , m_are_tangent_spaces_computed(m_points.size(), false)
+ , m_tangent_spaces(m_points.size(), Tangent_space_basis())
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ , m_orth_spaces(m_points.size(), Orthogonal_space_basis())
+#endif
+#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
+ , m_points_for_tse(first_for_tse, last_for_tse)
+ , m_points_ds_for_tse(m_points_for_tse)
+#endif
+ { }
+
+ /// Destructor
+ ~Tangential_complex() {
+#if defined(GUDHI_USE_TBB) && defined(GUDHI_TC_PERTURB_POSITION)
+ delete [] m_p_perturb_mutexes;
+#endif
+ }
+
+ /// Returns the intrinsic dimension of the manifold.
+ int intrinsic_dimension() const {
+ return m_intrinsic_dim;
+ }
+
+ /// Returns the ambient dimension.
+ int ambient_dimension() const {
+ return m_ambient_dim;
+ }
+
+ Points const& points() const {
+ return m_points;
+ }
+
+ /** \brief Returns the point corresponding to the vertex given as parameter.
+ *
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ * @return The point found.
+ */
+ Point get_point(std::size_t vertex) const {
+ return m_points[vertex];
+ }
+
+ /** \brief Returns the perturbed position of the point corresponding to the vertex given as parameter.
+ *
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ * @return The perturbed position of the point found.
+ */
+ Point get_perturbed_point(std::size_t vertex) const {
+ return compute_perturbed_point(vertex);
+ }
+
+ /// Returns the number of vertices.
+
+ std::size_t number_of_vertices() const {
+ return m_points.size();
+ }
+
+ void set_weights(const Weights& weights) {
+ m_weights = weights;
+ }
+
+ void set_tangent_planes(const TS_container& tangent_spaces
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ , const OS_container& orthogonal_spaces
+#endif
+ ) {
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ GUDHI_CHECK(
+ m_points.size() == tangent_spaces.size()
+ && m_points.size() == orthogonal_spaces.size(),
+ std::logic_error("Wrong sizes"));
+#else
+ GUDHI_CHECK(
+ m_points.size() == tangent_spaces.size(),
+ std::logic_error("Wrong sizes"));
+#endif
+ m_tangent_spaces = tangent_spaces;
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ m_orth_spaces = orthogonal_spaces;
+#endif
+ for (std::size_t i = 0; i < m_points.size(); ++i)
+ m_are_tangent_spaces_computed[i] = true;
+ }
+
+ /// Computes the tangential complex.
+ void compute_tangential_complex() {
+#ifdef GUDHI_TC_PERFORM_EXTRA_CHECKS
+ std::cerr << red << "WARNING: GUDHI_TC_PERFORM_EXTRA_CHECKS is defined. "
+ << "Computation might be slower than usual.\n" << white;
+#endif
+
+#if defined(GUDHI_TC_PROFILING) && defined(GUDHI_USE_TBB)
+ Gudhi::Clock t;
+#endif
+
+ // We need to do that because we don't want the container to copy the
+ // already-computed triangulations (while resizing) since it would
+ // invalidate the vertex handles stored beside the triangulations
+ m_triangulations.resize(m_points.size());
+ m_stars.resize(m_points.size());
+ m_squared_star_spheres_radii_incl_margin.resize(m_points.size(), FT(-1));
+#ifdef GUDHI_TC_PERTURB_POSITION
+ if (m_points.empty())
+ m_translations.clear();
+ else
+ m_translations.resize(m_points.size(),
+ m_k.construct_vector_d_object()(m_ambient_dim));
+#if defined(GUDHI_USE_TBB)
+ delete [] m_p_perturb_mutexes;
+ m_p_perturb_mutexes = new Mutex_for_perturb[m_points.size()];
+#endif
+#endif
+
+#ifdef GUDHI_USE_TBB
+ // Parallel
+ if (boost::is_convertible<Concurrency_tag, CGAL::Parallel_tag>::value) {
+ tbb::parallel_for(tbb::blocked_range<size_t>(0, m_points.size()),
+ Compute_tangent_triangulation(*this));
+ } else {
+#endif // GUDHI_USE_TBB
+ // Sequential
+ for (std::size_t i = 0; i < m_points.size(); ++i)
+ compute_tangent_triangulation(i);
+#ifdef GUDHI_USE_TBB
+ }
+#endif // GUDHI_USE_TBB
+
+#if defined(GUDHI_TC_PROFILING) && defined(GUDHI_USE_TBB)
+ t.end();
+ std::cerr << "Tangential complex computed in " << t.num_seconds()
+ << " seconds.\n";
+#endif
+ }
+
+ /// \brief Type returned by `Tangential_complex::fix_inconsistencies_using_perturbation`.
+ struct Fix_inconsistencies_info {
+ /// `true` if all inconsistencies could be removed, `false` if the time limit has been reached before
+ bool success = false;
+ /// number of steps performed
+ unsigned int num_steps = 0;
+ /// initial number of inconsistent stars
+ std::size_t initial_num_inconsistent_stars = 0;
+ /// best number of inconsistent stars during the process
+ std::size_t best_num_inconsistent_stars = 0;
+ /// final number of inconsistent stars
+ std::size_t final_num_inconsistent_stars = 0;
+ };
+
+ /** \brief Attempts to fix inconsistencies by perturbing the point positions.
+ *
+ * @param[in] max_perturb Maximum length of the translations used by the perturbation.
+ * @param[in] time_limit Time limit in seconds. If -1, no time limit is set.
+ */
+ Fix_inconsistencies_info fix_inconsistencies_using_perturbation(double max_perturb, double time_limit = -1.) {
+ Fix_inconsistencies_info info;
+
+ if (time_limit == 0.)
+ return info;
+
+ Gudhi::Clock t;
+
+#ifdef GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES
+ std::tuple<std::size_t, std::size_t, std::size_t> stats_before =
+ number_of_inconsistent_simplices(false);
+
+ if (std::get<1>(stats_before) == 0) {
+#ifdef DEBUG_TRACES
+ std::cerr << "Nothing to fix.\n";
+#endif
+ info.success = false;
+ return info;
+ }
+#endif // GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES
+
+ m_last_max_perturb = max_perturb;
+
+ bool done = false;
+ info.best_num_inconsistent_stars = m_triangulations.size();
+ info.num_steps = 0;
+ while (!done) {
+#ifdef GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES
+ std::cerr
+ << "\nBefore fix step:\n"
+ << " * Total number of simplices in stars (incl. duplicates): "
+ << std::get<0>(stats_before) << "\n"
+ << " * Num inconsistent simplices in stars (incl. duplicates): "
+ << red << std::get<1>(stats_before) << white << " ("
+ << 100. * std::get<1>(stats_before) / std::get<0>(stats_before) << "%)\n"
+ << " * Number of stars containing inconsistent simplices: "
+ << red << std::get<2>(stats_before) << white << " ("
+ << 100. * std::get<2>(stats_before) / m_points.size() << "%)\n";
+#endif
+
+#if defined(DEBUG_TRACES) || defined(GUDHI_TC_PROFILING)
+ std::cerr << yellow
+ << "\nAttempt to fix inconsistencies using perturbations - step #"
+ << info.num_steps + 1 << "... " << white;
+#endif
+
+ std::size_t num_inconsistent_stars = 0;
+ std::vector<std::size_t> updated_points;
+
+#ifdef GUDHI_TC_PROFILING
+ Gudhi::Clock t_fix_step;
+#endif
+
+ // Parallel
+#if defined(GUDHI_USE_TBB)
+ if (boost::is_convertible<Concurrency_tag, CGAL::Parallel_tag>::value) {
+ tbb::combinable<std::size_t> num_inconsistencies;
+ tbb::combinable<std::vector<std::size_t> > tls_updated_points;
+ tbb::parallel_for(
+ tbb::blocked_range<size_t>(0, m_triangulations.size()),
+ Try_to_solve_inconsistencies_in_a_local_triangulation(*this, max_perturb,
+ num_inconsistencies,
+ tls_updated_points));
+ num_inconsistent_stars =
+ num_inconsistencies.combine(std::plus<std::size_t>());
+ updated_points = tls_updated_points.combine(
+ [](std::vector<std::size_t> const& x,
+ std::vector<std::size_t> const& y) {
+ std::vector<std::size_t> res;
+ res.reserve(x.size() + y.size());
+ res.insert(res.end(), x.begin(), x.end());
+ res.insert(res.end(), y.begin(), y.end());
+ return res;
+ });
+ } else {
+#endif // GUDHI_USE_TBB
+ // Sequential
+ for (std::size_t i = 0; i < m_triangulations.size(); ++i) {
+ num_inconsistent_stars +=
+ try_to_solve_inconsistencies_in_a_local_triangulation(i, max_perturb,
+ std::back_inserter(updated_points));
+ }
+#if defined(GUDHI_USE_TBB)
+ }
+#endif // GUDHI_USE_TBB
+
+#ifdef GUDHI_TC_PROFILING
+ t_fix_step.end();
+#endif
+
+#if defined(GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES) || defined(DEBUG_TRACES)
+ std::cerr
+ << "\nEncountered during fix:\n"
+ << " * Num stars containing inconsistent simplices: "
+ << red << num_inconsistent_stars << white
+ << " (" << 100. * num_inconsistent_stars / m_points.size() << "%)\n";
+#endif
+
+#ifdef GUDHI_TC_PROFILING
+ std::cerr << yellow << "done in " << t_fix_step.num_seconds()
+ << " seconds.\n" << white;
+#elif defined(DEBUG_TRACES)
+ std::cerr << yellow << "done.\n" << white;
+#endif
+
+ if (num_inconsistent_stars > 0)
+ refresh_tangential_complex(updated_points);
+
+#ifdef GUDHI_TC_PERFORM_EXTRA_CHECKS
+ // Confirm that all stars were actually refreshed
+ std::size_t num_inc_1 =
+ std::get<1>(number_of_inconsistent_simplices(false));
+ refresh_tangential_complex();
+ std::size_t num_inc_2 =
+ std::get<1>(number_of_inconsistent_simplices(false));
+ if (num_inc_1 != num_inc_2)
+ std::cerr << red << "REFRESHMENT CHECK: FAILED. ("
+ << num_inc_1 << " vs " << num_inc_2 << ")\n" << white;
+ else
+ std::cerr << green << "REFRESHMENT CHECK: PASSED.\n" << white;
+#endif
+
+#ifdef GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES
+ std::tuple<std::size_t, std::size_t, std::size_t> stats_after =
+ number_of_inconsistent_simplices(false);
+
+ std::cerr
+ << "\nAfter fix:\n"
+ << " * Total number of simplices in stars (incl. duplicates): "
+ << std::get<0>(stats_after) << "\n"
+ << " * Num inconsistent simplices in stars (incl. duplicates): "
+ << red << std::get<1>(stats_after) << white << " ("
+ << 100. * std::get<1>(stats_after) / std::get<0>(stats_after) << "%)\n"
+ << " * Number of stars containing inconsistent simplices: "
+ << red << std::get<2>(stats_after) << white << " ("
+ << 100. * std::get<2>(stats_after) / m_points.size() << "%)\n";
+
+ stats_before = stats_after;
+#endif
+
+ if (info.num_steps == 0)
+ info.initial_num_inconsistent_stars = num_inconsistent_stars;
+
+ if (num_inconsistent_stars < info.best_num_inconsistent_stars)
+ info.best_num_inconsistent_stars = num_inconsistent_stars;
+
+ info.final_num_inconsistent_stars = num_inconsistent_stars;
+
+ done = (num_inconsistent_stars == 0);
+ if (!done) {
+ ++info.num_steps;
+ if (time_limit > 0. && t.num_seconds() > time_limit) {
+#ifdef DEBUG_TRACES
+ std::cerr << red << "Time limit reached.\n" << white;
+#endif
+ info.success = false;
+ return info;
+ }
+ }
+ }
+
+#ifdef DEBUG_TRACES
+ std::cerr << green << "Fixed!\n" << white;
+#endif
+ info.success = true;
+ return info;
+ }
+
+ /// \brief Type returned by `Tangential_complex::number_of_inconsistent_simplices`.
+ struct Num_inconsistencies {
+ /// Total number of simplices in stars (including duplicates that appear in several stars)
+ std::size_t num_simplices = 0;
+ /// Number of inconsistent simplices
+ std::size_t num_inconsistent_simplices = 0;
+ /// Number of stars containing at least one inconsistent simplex
+ std::size_t num_inconsistent_stars = 0;
+ };
+
+ /// Returns the number of inconsistencies
+ /// @param[in] verbose If true, outputs a message into `std::cerr`.
+
+ Num_inconsistencies
+ number_of_inconsistent_simplices(
+#ifdef DEBUG_TRACES
+ bool verbose = true
+#else
+ bool verbose = false
+#endif
+ ) const {
+ Num_inconsistencies stats;
+
+ // For each triangulation
+ for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
+ bool is_star_inconsistent = false;
+
+ // For each cell
+ Star::const_iterator it_inc_simplex = m_stars[idx].begin();
+ Star::const_iterator it_inc_simplex_end = m_stars[idx].end();
+ for (; it_inc_simplex != it_inc_simplex_end; ++it_inc_simplex) {
+ // Don't check infinite cells
+ if (is_infinite(*it_inc_simplex))
+ continue;
+
+ Simplex c = *it_inc_simplex;
+ c.insert(idx); // Add the missing index
+
+ if (!is_simplex_consistent(c)) {
+ ++stats.num_inconsistent_simplices;
+ is_star_inconsistent = true;
+ }
+
+ ++stats.num_simplices;
+ }
+ stats.num_inconsistent_stars += is_star_inconsistent;
+ }
+
+ if (verbose) {
+ std::cerr
+ << "\n==========================================================\n"
+ << "Inconsistencies:\n"
+ << " * Total number of simplices in stars (incl. duplicates): "
+ << stats.num_simplices << "\n"
+ << " * Number of inconsistent simplices in stars (incl. duplicates): "
+ << stats.num_inconsistent_simplices << " ("
+ << 100. * stats.num_inconsistent_simplices / stats.num_simplices << "%)\n"
+ << " * Number of stars containing inconsistent simplices: "
+ << stats.num_inconsistent_stars << " ("
+ << 100. * stats.num_inconsistent_stars / m_points.size() << "%)\n"
+ << "==========================================================\n";
+ }
+
+ return stats;
+ }
+
+ /** \brief Exports the complex into a Simplex_tree.
+ *
+ * \tparam Simplex_tree_ must be a `Simplex_tree`.
+ *
+ * @param[out] tree The result, where each `Vertex_handle` is the index of the
+ * corresponding point in the range provided to the constructor (it can also be
+ * retrieved through the `Tangential_complex::get_point` function.
+ * @param[in] export_inconsistent_simplices Also export inconsistent simplices or not?
+ * @return The maximal dimension of the simplices.
+ */
+ template <typename Simplex_tree_>
+ int create_complex(Simplex_tree_ &tree
+ , bool export_inconsistent_simplices = true
+ /// \cond ADVANCED_PARAMETERS
+ , bool export_infinite_simplices = false
+ , Simplex_set *p_inconsistent_simplices = NULL
+ /// \endcond
+ ) const {
+#if defined(DEBUG_TRACES) || defined(GUDHI_TC_PROFILING)
+ std::cerr << yellow
+ << "\nExporting the TC as a Simplex_tree... " << white;
+#endif
+#ifdef GUDHI_TC_PROFILING
+ Gudhi::Clock t;
+#endif
+
+ int max_dim = -1;
+
+ // For each triangulation
+ for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
+ // For each cell of the star
+ Star::const_iterator it_inc_simplex = m_stars[idx].begin();
+ Star::const_iterator it_inc_simplex_end = m_stars[idx].end();
+ for (; it_inc_simplex != it_inc_simplex_end; ++it_inc_simplex) {
+ Simplex c = *it_inc_simplex;
+
+ // Don't export infinite cells
+ if (!export_infinite_simplices && is_infinite(c))
+ continue;
+
+ if (!export_inconsistent_simplices && !is_simplex_consistent(c))
+ continue;
+
+ if (static_cast<int> (c.size()) > max_dim)
+ max_dim = static_cast<int> (c.size());
+ // Add the missing center vertex
+ c.insert(idx);
+
+ // Try to insert the simplex
+ bool inserted = tree.insert_simplex_and_subfaces(c).second;
+
+ // Inconsistent?
+ if (p_inconsistent_simplices && inserted && !is_simplex_consistent(c)) {
+ p_inconsistent_simplices->insert(c);
+ }
+ }
+ }
+
+#ifdef GUDHI_TC_PROFILING
+ t.end();
+ std::cerr << yellow << "done in " << t.num_seconds()
+ << " seconds.\n" << white;
+#elif defined(DEBUG_TRACES)
+ std::cerr << yellow << "done.\n" << white;
+#endif
+
+ return max_dim;
+ }
+
+ // First clears the complex then exports the TC into it
+ // Returns the max dimension of the simplices
+ // check_lower_and_higher_dim_simplices : 0 (false), 1 (true), 2 (auto)
+ // If the check is enabled, the function:
+ // - won't insert the simplex if it is already in a higher dim simplex
+ // - will erase any lower-dim simplices that are faces of the new simplex
+ // "auto" (= 2) will enable the check as a soon as it encounters a
+ // simplex whose dimension is different from the previous ones.
+ // N.B.: The check is quite expensive.
+
+ int create_complex(Simplicial_complex &complex,
+ bool export_inconsistent_simplices = true,
+ bool export_infinite_simplices = false,
+ int check_lower_and_higher_dim_simplices = 2,
+ Simplex_set *p_inconsistent_simplices = NULL) const {
+#if defined(DEBUG_TRACES) || defined(GUDHI_TC_PROFILING)
+ std::cerr << yellow
+ << "\nExporting the TC as a Simplicial_complex... " << white;
+#endif
+#ifdef GUDHI_TC_PROFILING
+ Gudhi::Clock t;
+#endif
+
+ int max_dim = -1;
+ complex.clear();
+
+ // For each triangulation
+ for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
+ // For each cell of the star
+ Star::const_iterator it_inc_simplex = m_stars[idx].begin();
+ Star::const_iterator it_inc_simplex_end = m_stars[idx].end();
+ for (; it_inc_simplex != it_inc_simplex_end; ++it_inc_simplex) {
+ Simplex c = *it_inc_simplex;
+
+ // Don't export infinite cells
+ if (!export_infinite_simplices && is_infinite(c))
+ continue;
+
+ if (!export_inconsistent_simplices && !is_simplex_consistent(c))
+ continue;
+
+ // Unusual simplex dim?
+ if (check_lower_and_higher_dim_simplices == 2
+ && max_dim != -1
+ && static_cast<int> (c.size()) != max_dim) {
+ // Let's activate the check
+ std::cerr << red <<
+ "Info: check_lower_and_higher_dim_simplices ACTIVATED. "
+ "Export might be take some time...\n" << white;
+ check_lower_and_higher_dim_simplices = 1;
+ }
+
+ if (static_cast<int> (c.size()) > max_dim)
+ max_dim = static_cast<int> (c.size());
+ // Add the missing center vertex
+ c.insert(idx);
+
+ // Try to insert the simplex
+ bool added =
+ complex.add_simplex(c, check_lower_and_higher_dim_simplices == 1);
+
+ // Inconsistent?
+ if (p_inconsistent_simplices && added && !is_simplex_consistent(c)) {
+ p_inconsistent_simplices->insert(c);
+ }
+ }
+ }
+
+#ifdef GUDHI_TC_PROFILING
+ t.end();
+ std::cerr << yellow << "done in " << t.num_seconds()
+ << " seconds.\n" << white;
+#elif defined(DEBUG_TRACES)
+ std::cerr << yellow << "done.\n" << white;
+#endif
+
+ return max_dim;
+ }
+
+ template<typename ProjectionFunctor = CGAL::Identity<Point> >
+ std::ostream &export_to_off(
+ const Simplicial_complex &complex, std::ostream & os,
+ Simplex_set const *p_simpl_to_color_in_red = NULL,
+ Simplex_set const *p_simpl_to_color_in_green = NULL,
+ Simplex_set const *p_simpl_to_color_in_blue = NULL,
+ ProjectionFunctor const& point_projection = ProjectionFunctor())
+ const {
+ return export_to_off(
+ os, false, p_simpl_to_color_in_red, p_simpl_to_color_in_green,
+ p_simpl_to_color_in_blue, &complex, point_projection);
+ }
+
+ template<typename ProjectionFunctor = CGAL::Identity<Point> >
+ std::ostream &export_to_off(
+ std::ostream & os, bool color_inconsistencies = false,
+ Simplex_set const *p_simpl_to_color_in_red = NULL,
+ Simplex_set const *p_simpl_to_color_in_green = NULL,
+ Simplex_set const *p_simpl_to_color_in_blue = NULL,
+ const Simplicial_complex *p_complex = NULL,
+ ProjectionFunctor const& point_projection = ProjectionFunctor()) const {
+ if (m_points.empty())
+ return os;
+
+ if (m_ambient_dim < 2) {
+ std::cerr << "Error: export_to_off => ambient dimension should be >= 2.\n";
+ os << "Error: export_to_off => ambient dimension should be >= 2.\n";
+ return os;
+ }
+ if (m_ambient_dim > 3) {
+ std::cerr << "Warning: export_to_off => ambient dimension should be "
+ "<= 3. Only the first 3 coordinates will be exported.\n";
+ }
+
+ if (m_intrinsic_dim < 1 || m_intrinsic_dim > 3) {
+ std::cerr << "Error: export_to_off => intrinsic dimension should be "
+ "between 1 and 3.\n";
+ os << "Error: export_to_off => intrinsic dimension should be "
+ "between 1 and 3.\n";
+ return os;
+ }
+
+ std::stringstream output;
+ std::size_t num_simplices, num_vertices;
+ export_vertices_to_off(output, num_vertices, false, point_projection);
+ if (p_complex) {
+ export_simplices_to_off(
+ *p_complex, output, num_simplices, p_simpl_to_color_in_red,
+ p_simpl_to_color_in_green, p_simpl_to_color_in_blue);
+ } else {
+ export_simplices_to_off(
+ output, num_simplices, color_inconsistencies, p_simpl_to_color_in_red,
+ p_simpl_to_color_in_green, p_simpl_to_color_in_blue);
+ }
+
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ os << "N";
+#endif
+
+ os << "OFF \n"
+ << num_vertices << " "
+ << num_simplices << " "
+ << "0 \n"
+ << output.str();
+
+ return os;
+ }
+
+ private:
+ void refresh_tangential_complex() {
+#if defined(DEBUG_TRACES) || defined(GUDHI_TC_PROFILING)
+ std::cerr << yellow << "\nRefreshing TC... " << white;
+#endif
+
+#ifdef GUDHI_TC_PROFILING
+ Gudhi::Clock t;
+#endif
+#ifdef GUDHI_USE_TBB
+ // Parallel
+ if (boost::is_convertible<Concurrency_tag, CGAL::Parallel_tag>::value) {
+ tbb::parallel_for(tbb::blocked_range<size_t>(0, m_points.size()),
+ Compute_tangent_triangulation(*this));
+ } else {
+#endif // GUDHI_USE_TBB
+ // Sequential
+ for (std::size_t i = 0; i < m_points.size(); ++i)
+ compute_tangent_triangulation(i);
+#ifdef GUDHI_USE_TBB
+ }
+#endif // GUDHI_USE_TBB
+
+#ifdef GUDHI_TC_PROFILING
+ t.end();
+ std::cerr << yellow << "done in " << t.num_seconds()
+ << " seconds.\n" << white;
+#elif defined(DEBUG_TRACES)
+ std::cerr << yellow << "done.\n" << white;
+#endif
+ }
+
+ // If the list of perturbed points is provided, it is much faster
+ template <typename Point_indices_range>
+ void refresh_tangential_complex(
+ Point_indices_range const& perturbed_points_indices) {
+#if defined(DEBUG_TRACES) || defined(GUDHI_TC_PROFILING)
+ std::cerr << yellow << "\nRefreshing TC... " << white;
+#endif
+
+#ifdef GUDHI_TC_PROFILING
+ Gudhi::Clock t;
+#endif
+
+ // ANN tree containing only the perturbed points
+ Points_ds updated_pts_ds(m_points, perturbed_points_indices);
+
+#ifdef GUDHI_USE_TBB
+ // Parallel
+ if (boost::is_convertible<Concurrency_tag, CGAL::Parallel_tag>::value) {
+ tbb::parallel_for(tbb::blocked_range<size_t>(0, m_points.size()),
+ Refresh_tangent_triangulation(*this, updated_pts_ds));
+ } else {
+#endif // GUDHI_USE_TBB
+ // Sequential
+ for (std::size_t i = 0; i < m_points.size(); ++i)
+ refresh_tangent_triangulation(i, updated_pts_ds);
+#ifdef GUDHI_USE_TBB
+ }
+#endif // GUDHI_USE_TBB
+
+#ifdef GUDHI_TC_PROFILING
+ t.end();
+ std::cerr << yellow << "done in " << t.num_seconds()
+ << " seconds.\n" << white;
+#elif defined(DEBUG_TRACES)
+ std::cerr << yellow << "done.\n" << white;
+#endif
+ }
+
+ void export_inconsistent_stars_to_OFF_files(std::string const& filename_base) const {
+ // For each triangulation
+ for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
+ // We build a SC along the way in case it's inconsistent
+ Simplicial_complex sc;
+ // For each cell
+ bool is_inconsistent = false;
+ Star::const_iterator it_inc_simplex = m_stars[idx].begin();
+ Star::const_iterator it_inc_simplex_end = m_stars[idx].end();
+ for (; it_inc_simplex != it_inc_simplex_end;
+ ++it_inc_simplex) {
+ // Skip infinite cells
+ if (is_infinite(*it_inc_simplex))
+ continue;
+
+ Simplex c = *it_inc_simplex;
+ c.insert(idx); // Add the missing index
+
+ sc.add_simplex(c);
+
+ // If we do not already know this star is inconsistent, test it
+ if (!is_inconsistent && !is_simplex_consistent(c))
+ is_inconsistent = true;
+ }
+
+ if (is_inconsistent) {
+ // Export star to OFF file
+ std::stringstream output_filename;
+ output_filename << filename_base << "_" << idx << ".off";
+ std::ofstream off_stream(output_filename.str().c_str());
+ export_to_off(sc, off_stream);
+ }
+ }
+ }
+
+ class Compare_distance_to_ref_point {
+ public:
+ Compare_distance_to_ref_point(Point const& ref, K const& k)
+ : m_ref(ref), m_k(k) { }
+
+ bool operator()(Point const& p1, Point const& p2) {
+ typename K::Squared_distance_d sqdist =
+ m_k.squared_distance_d_object();
+ return sqdist(p1, m_ref) < sqdist(p2, m_ref);
+ }
+
+ private:
+ Point const& m_ref;
+ K const& m_k;
+ };
+
+#ifdef GUDHI_USE_TBB
+ // Functor for compute_tangential_complex function
+ class Compute_tangent_triangulation {
+ Tangential_complex & m_tc;
+
+ public:
+ // Constructor
+ Compute_tangent_triangulation(Tangential_complex &tc)
+ : m_tc(tc) { }
+
+ // Constructor
+ Compute_tangent_triangulation(const Compute_tangent_triangulation &ctt)
+ : m_tc(ctt.m_tc) { }
+
+ // operator()
+ void operator()(const tbb::blocked_range<size_t>& r) const {
+ for (size_t i = r.begin(); i != r.end(); ++i)
+ m_tc.compute_tangent_triangulation(i);
+ }
+ };
+
+ // Functor for refresh_tangential_complex function
+ class Refresh_tangent_triangulation {
+ Tangential_complex & m_tc;
+ Points_ds const& m_updated_pts_ds;
+
+ public:
+ // Constructor
+ Refresh_tangent_triangulation(Tangential_complex &tc, Points_ds const& updated_pts_ds)
+ : m_tc(tc), m_updated_pts_ds(updated_pts_ds) { }
+
+ // Constructor
+ Refresh_tangent_triangulation(const Refresh_tangent_triangulation &ctt)
+ : m_tc(ctt.m_tc), m_updated_pts_ds(ctt.m_updated_pts_ds) { }
+
+ // operator()
+ void operator()(const tbb::blocked_range<size_t>& r) const {
+ for (size_t i = r.begin(); i != r.end(); ++i)
+ m_tc.refresh_tangent_triangulation(i, m_updated_pts_ds);
+ }
+ };
+#endif // GUDHI_USE_TBB
+
+ bool is_infinite(Simplex const& s) const {
+ return *s.rbegin() == (std::numeric_limits<std::size_t>::max)();
+ }
+
+ // Output: "triangulation" is a Regular Triangulation containing at least the
+ // star of "center_pt"
+ // Returns the handle of the center vertex
+ Tr_vertex_handle compute_star(std::size_t i, const Point &center_pt, const Tangent_space_basis &tsb,
+ Triangulation &triangulation, bool verbose = false) {
+ int tangent_space_dim = tsb.dimension();
+ const Tr_traits &local_tr_traits = triangulation.geom_traits();
+ Tr_vertex_handle center_vertex;
+
+ // Kernel functor & objects
+ typename K::Squared_distance_d k_sqdist = m_k.squared_distance_d_object();
+
+ // Triangulation's traits functor & objects
+ typename Tr_traits::Compute_weight_d point_weight = local_tr_traits.compute_weight_d_object();
+ typename Tr_traits::Power_center_d power_center = local_tr_traits.power_center_d_object();
+
+ //***************************************************
+ // Build a minimal triangulation in the tangent space
+ // (we only need the star of p)
+ //***************************************************
+
+ // Insert p
+ Tr_point proj_wp;
+ if (i == tsb.origin()) {
+ // Insert {(0, 0, 0...), m_weights[i]}
+ proj_wp = local_tr_traits.construct_weighted_point_d_object()(local_tr_traits.construct_point_d_object()(tangent_space_dim, CGAL::ORIGIN),
+ m_weights[i]);
+ } else {
+ const Weighted_point& wp = compute_perturbed_weighted_point(i);
+ proj_wp = project_point_and_compute_weight(wp, tsb, local_tr_traits);
+ }
+
+ center_vertex = triangulation.insert(proj_wp);
+ center_vertex->data() = i;
+ if (verbose)
+ std::cerr << "* Inserted point #" << i << "\n";
+
+#ifdef GUDHI_TC_VERY_VERBOSE
+ std::size_t num_attempts_to_insert_points = 1;
+ std::size_t num_inserted_points = 1;
+#endif
+ // const int NUM_NEIGHBORS = 150;
+ // KNS_range ins_range = m_points_ds.query_k_nearest_neighbors(center_pt, NUM_NEIGHBORS);
+ INS_range ins_range = m_points_ds.query_incremental_nearest_neighbors(center_pt);
+
+ // While building the local triangulation, we keep the radius
+ // of the sphere "star sphere" centered at "center_vertex"
+ // and which contains all the
+ // circumspheres of the star of "center_vertex"
+ boost::optional<FT> squared_star_sphere_radius_plus_margin;
+
+ // Insert points until we find a point which is outside "star sphere"
+ for (auto nn_it = ins_range.begin();
+ nn_it != ins_range.end();
+ ++nn_it) {
+ std::size_t neighbor_point_idx = nn_it->first;
+
+ // ith point = p, which is already inserted
+ if (neighbor_point_idx != i) {
+ // No need to lock the Mutex_for_perturb here since this will not be
+ // called while other threads are perturbing the positions
+ Point neighbor_pt;
+ FT neighbor_weight;
+ compute_perturbed_weighted_point(neighbor_point_idx, neighbor_pt, neighbor_weight);
+
+ if (squared_star_sphere_radius_plus_margin &&
+ k_sqdist(center_pt, neighbor_pt) > *squared_star_sphere_radius_plus_margin)
+ break;
+
+ Tr_point proj_pt = project_point_and_compute_weight(neighbor_pt, neighbor_weight, tsb,
+ local_tr_traits);
+
+#ifdef GUDHI_TC_VERY_VERBOSE
+ ++num_attempts_to_insert_points;
+#endif
+
+
+ Tr_vertex_handle vh = triangulation.insert_if_in_star(proj_pt, center_vertex);
+ // Tr_vertex_handle vh = triangulation.insert(proj_pt);
+ if (vh != Tr_vertex_handle() && vh->data() == (std::numeric_limits<std::size_t>::max)()) {
+#ifdef GUDHI_TC_VERY_VERBOSE
+ ++num_inserted_points;
+#endif
+ if (verbose)
+ std::cerr << "* Inserted point #" << neighbor_point_idx << "\n";
+
+ vh->data() = neighbor_point_idx;
+
+ // Let's recompute squared_star_sphere_radius_plus_margin
+ if (triangulation.current_dimension() >= tangent_space_dim) {
+ squared_star_sphere_radius_plus_margin = boost::none;
+ // Get the incident cells and look for the biggest circumsphere
+ std::vector<Tr_full_cell_handle> incident_cells;
+ triangulation.incident_full_cells(
+ center_vertex,
+ std::back_inserter(incident_cells));
+ for (typename std::vector<Tr_full_cell_handle>::iterator cit =
+ incident_cells.begin(); cit != incident_cells.end(); ++cit) {
+ Tr_full_cell_handle cell = *cit;
+ if (triangulation.is_infinite(cell)) {
+ squared_star_sphere_radius_plus_margin = boost::none;
+ break;
+ } else {
+ // Note that this uses the perturbed point since it uses
+ // the points of the local triangulation
+ Tr_point c = power_center(boost::make_transform_iterator(cell->vertices_begin(),
+ vertex_handle_to_point<Tr_point,
+ Tr_vertex_handle>),
+ boost::make_transform_iterator(cell->vertices_end(),
+ vertex_handle_to_point<Tr_point,
+ Tr_vertex_handle>));
+
+ FT sq_power_sphere_diam = 4 * point_weight(c);
+
+ if (!squared_star_sphere_radius_plus_margin ||
+ sq_power_sphere_diam > *squared_star_sphere_radius_plus_margin) {
+ squared_star_sphere_radius_plus_margin = sq_power_sphere_diam;
+ }
+ }
+ }
+
+ // Let's add the margin, now
+ // The value depends on whether we perturb weight or position
+ if (squared_star_sphere_radius_plus_margin) {
+ // "2*m_last_max_perturb" because both points can be perturbed
+ squared_star_sphere_radius_plus_margin = CGAL::square(std::sqrt(*squared_star_sphere_radius_plus_margin)
+ + 2 * m_last_max_perturb);
+
+ // Save it in `m_squared_star_spheres_radii_incl_margin`
+ m_squared_star_spheres_radii_incl_margin[i] =
+ *squared_star_sphere_radius_plus_margin;
+ } else {
+ m_squared_star_spheres_radii_incl_margin[i] = FT(-1);
+ }
+ }
+ }
+ }
+ }
+
+ return center_vertex;
+ }
+
+ void refresh_tangent_triangulation(std::size_t i, Points_ds const& updated_pts_ds, bool verbose = false) {
+ if (verbose)
+ std::cerr << "** Refreshing tangent tri #" << i << " **\n";
+
+ if (m_squared_star_spheres_radii_incl_margin[i] == FT(-1))
+ return compute_tangent_triangulation(i, verbose);
+
+ Point center_point = compute_perturbed_point(i);
+ // Among updated point, what is the closer from our center point?
+ std::size_t closest_pt_index =
+ updated_pts_ds.query_k_nearest_neighbors(center_point, 1, false).begin()->first;
+
+ typename K::Construct_weighted_point_d k_constr_wp =
+ m_k.construct_weighted_point_d_object();
+ typename K::Power_distance_d k_power_dist = m_k.power_distance_d_object();
+
+ // Construct a weighted point equivalent to the star sphere
+ Weighted_point star_sphere = k_constr_wp(compute_perturbed_point(i),
+ m_squared_star_spheres_radii_incl_margin[i]);
+ Weighted_point closest_updated_point =
+ compute_perturbed_weighted_point(closest_pt_index);
+
+ // Is the "closest point" inside our star sphere?
+ if (k_power_dist(star_sphere, closest_updated_point) <= FT(0))
+ compute_tangent_triangulation(i, verbose);
+ }
+
+ void compute_tangent_triangulation(std::size_t i, bool verbose = false) {
+ if (verbose)
+ std::cerr << "** Computing tangent tri #" << i << " **\n";
+ // std::cerr << "***********************************************\n";
+
+ // No need to lock the mutex here since this will not be called while
+ // other threads are perturbing the positions
+ const Point center_pt = compute_perturbed_point(i);
+ Tangent_space_basis &tsb = m_tangent_spaces[i];
+
+ // Estimate the tangent space
+ if (!m_are_tangent_spaces_computed[i]) {
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ tsb = compute_tangent_space(center_pt, i, true /*normalize*/, &m_orth_spaces[i]);
+#else
+ tsb = compute_tangent_space(center_pt, i);
+#endif
+ }
+
+#if defined(GUDHI_TC_PROFILING) && defined(GUDHI_TC_VERY_VERBOSE)
+ Gudhi::Clock t;
+#endif
+ int tangent_space_dim = tangent_basis_dim(i);
+ Triangulation &local_tr =
+ m_triangulations[i].construct_triangulation(tangent_space_dim);
+
+ m_triangulations[i].center_vertex() =
+ compute_star(i, center_pt, tsb, local_tr, verbose);
+
+#if defined(GUDHI_TC_PROFILING) && defined(GUDHI_TC_VERY_VERBOSE)
+ t.end();
+ std::cerr << " - triangulation construction: " << t.num_seconds() << " s.\n";
+ t.reset();
+#endif
+
+#ifdef GUDHI_TC_VERY_VERBOSE
+ std::cerr << "Inserted " << num_inserted_points << " points / "
+ << num_attempts_to_insert_points << " attemps to compute the star\n";
+#endif
+
+ update_star(i);
+
+#if defined(GUDHI_TC_PROFILING) && defined(GUDHI_TC_VERY_VERBOSE)
+ t.end();
+ std::cerr << " - update_star: " << t.num_seconds() << " s.\n";
+#endif
+ }
+
+ // Updates m_stars[i] directly from m_triangulations[i]
+
+ void update_star(std::size_t i) {
+ Star &star = m_stars[i];
+ star.clear();
+ Triangulation &local_tr = m_triangulations[i].tr();
+ Tr_vertex_handle center_vertex = m_triangulations[i].center_vertex();
+ int cur_dim_plus_1 = local_tr.current_dimension() + 1;
+
+ std::vector<Tr_full_cell_handle> incident_cells;
+ local_tr.incident_full_cells(
+ center_vertex, std::back_inserter(incident_cells));
+
+ typename std::vector<Tr_full_cell_handle>::const_iterator it_c = incident_cells.begin();
+ typename std::vector<Tr_full_cell_handle>::const_iterator it_c_end = incident_cells.end();
+ // For each cell
+ for (; it_c != it_c_end; ++it_c) {
+ // Will contain all indices except center_vertex
+ Incident_simplex incident_simplex;
+ for (int j = 0; j < cur_dim_plus_1; ++j) {
+ std::size_t index = (*it_c)->vertex(j)->data();
+ if (index != i)
+ incident_simplex.insert(index);
+ }
+ GUDHI_CHECK(incident_simplex.size() == cur_dim_plus_1 - 1,
+ std::logic_error("update_star: wrong size of incident simplex"));
+ star.push_back(incident_simplex);
+ }
+ }
+
+ // Estimates tangent subspaces using PCA
+
+ Tangent_space_basis compute_tangent_space(const Point &p
+ , const std::size_t i
+ , bool normalize_basis = true
+ , Orthogonal_space_basis *p_orth_space_basis = NULL
+ ) {
+ unsigned int num_pts_for_pca = (std::min)(static_cast<unsigned int> (std::pow(GUDHI_TC_BASE_VALUE_FOR_PCA, m_intrinsic_dim)),
+ static_cast<unsigned int> (m_points.size()));
+
+ // Kernel functors
+ typename K::Construct_vector_d constr_vec =
+ m_k.construct_vector_d_object();
+ typename K::Compute_coordinate_d coord =
+ m_k.compute_coordinate_d_object();
+
+#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
+ KNS_range kns_range = m_points_ds_for_tse.query_k_nearest_neighbors(
+ p, num_pts_for_pca, false);
+ const Points &points_for_pca = m_points_for_tse;
+#else
+ KNS_range kns_range = m_points_ds.query_k_nearest_neighbors(p, num_pts_for_pca, false);
+ const Points &points_for_pca = m_points;
+#endif
+
+ // One row = one point
+ Eigen::MatrixXd mat_points(num_pts_for_pca, m_ambient_dim);
+ auto nn_it = kns_range.begin();
+ for (unsigned int j = 0;
+ j < num_pts_for_pca && nn_it != kns_range.end();
+ ++j, ++nn_it) {
+ for (int i = 0; i < m_ambient_dim; ++i) {
+ mat_points(j, i) = CGAL::to_double(coord(points_for_pca[nn_it->first], i));
+ }
+ }
+ Eigen::MatrixXd centered = mat_points.rowwise() - mat_points.colwise().mean();
+ Eigen::MatrixXd cov = centered.adjoint() * centered;
+ Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig(cov);
+
+ Tangent_space_basis tsb(i); // p = compute_perturbed_point(i) here
+
+ // The eigenvectors are sorted in increasing order of their corresponding
+ // eigenvalues
+ for (int j = m_ambient_dim - 1;
+ j >= m_ambient_dim - m_intrinsic_dim;
+ --j) {
+ if (normalize_basis) {
+ Vector v = constr_vec(m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim);
+ tsb.push_back(normalize_vector(v, m_k));
+ } else {
+ tsb.push_back(constr_vec(
+ m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim));
+ }
+ }
+
+ if (p_orth_space_basis) {
+ p_orth_space_basis->set_origin(i);
+ for (int j = m_ambient_dim - m_intrinsic_dim - 1;
+ j >= 0;
+ --j) {
+ if (normalize_basis) {
+ Vector v = constr_vec(m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim);
+ p_orth_space_basis->push_back(normalize_vector(v, m_k));
+ } else {
+ p_orth_space_basis->push_back(constr_vec(
+ m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim));
+ }
+ }
+ }
+
+ m_are_tangent_spaces_computed[i] = true;
+
+ return tsb;
+ }
+
+ // Compute the space tangent to a simplex (p1, p2, ... pn)
+ // TODO(CJ): Improve this?
+ // Basically, it takes all the neighbor points to p1, p2... pn and runs PCA
+ // on it. Note that most points are duplicated.
+
+ Tangent_space_basis compute_tangent_space(const Simplex &s, bool normalize_basis = true) {
+ unsigned int num_pts_for_pca = (std::min)(static_cast<unsigned int> (std::pow(GUDHI_TC_BASE_VALUE_FOR_PCA, m_intrinsic_dim)),
+ static_cast<unsigned int> (m_points.size()));
+
+ // Kernel functors
+ typename K::Construct_vector_d constr_vec =
+ m_k.construct_vector_d_object();
+ typename K::Compute_coordinate_d coord =
+ m_k.compute_coordinate_d_object();
+ typename K::Squared_length_d sqlen =
+ m_k.squared_length_d_object();
+ typename K::Scaled_vector_d scaled_vec =
+ m_k.scaled_vector_d_object();
+ typename K::Scalar_product_d scalar_pdct =
+ m_k.scalar_product_d_object();
+ typename K::Difference_of_vectors_d diff_vec =
+ m_k.difference_of_vectors_d_object();
+
+ // One row = one point
+ Eigen::MatrixXd mat_points(s.size() * num_pts_for_pca, m_ambient_dim);
+ unsigned int current_row = 0;
+
+ for (Simplex::const_iterator it_index = s.begin();
+ it_index != s.end(); ++it_index) {
+ const Point &p = m_points[*it_index];
+
+#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
+ KNS_range kns_range = m_points_ds_for_tse.query_k_nearest_neighbors(
+ p, num_pts_for_pca, false);
+ const Points &points_for_pca = m_points_for_tse;
+#else
+ KNS_range kns_range = m_points_ds.query_k_nearest_neighbors(p, num_pts_for_pca, false);
+ const Points &points_for_pca = m_points;
+#endif
+
+ auto nn_it = kns_range.begin();
+ for (;
+ current_row < num_pts_for_pca && nn_it != kns_range.end();
+ ++current_row, ++nn_it) {
+ for (int i = 0; i < m_ambient_dim; ++i) {
+ mat_points(current_row, i) =
+ CGAL::to_double(coord(points_for_pca[nn_it->first], i));
+ }
+ }
+ }
+ Eigen::MatrixXd centered = mat_points.rowwise() - mat_points.colwise().mean();
+ Eigen::MatrixXd cov = centered.adjoint() * centered;
+ Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig(cov);
+
+ Tangent_space_basis tsb;
+
+ // The eigenvectors are sorted in increasing order of their corresponding
+ // eigenvalues
+ for (int j = m_ambient_dim - 1;
+ j >= m_ambient_dim - m_intrinsic_dim;
+ --j) {
+ if (normalize_basis) {
+ Vector v = constr_vec(m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim);
+ tsb.push_back(normalize_vector(v, m_k));
+ } else {
+ tsb.push_back(constr_vec(
+ m_ambient_dim,
+ eig.eigenvectors().col(j).data(),
+ eig.eigenvectors().col(j).data() + m_ambient_dim));
+ }
+ }
+
+ return tsb;
+ }
+
+ // Returns the dimension of the ith local triangulation
+
+ int tangent_basis_dim(std::size_t i) const {
+ return m_tangent_spaces[i].dimension();
+ }
+
+ Point compute_perturbed_point(std::size_t pt_idx) const {
+#ifdef GUDHI_TC_PERTURB_POSITION
+ return m_k.translated_point_d_object()(
+ m_points[pt_idx], m_translations[pt_idx]);
+#else
+ return m_points[pt_idx];
+#endif
+ }
+
+ void compute_perturbed_weighted_point(std::size_t pt_idx, Point &p, FT &w) const {
+#ifdef GUDHI_TC_PERTURB_POSITION
+ p = m_k.translated_point_d_object()(
+ m_points[pt_idx], m_translations[pt_idx]);
+#else
+ p = m_points[pt_idx];
+#endif
+ w = m_weights[pt_idx];
+ }
+
+ Weighted_point compute_perturbed_weighted_point(std::size_t pt_idx) const {
+ typename K::Construct_weighted_point_d k_constr_wp =
+ m_k.construct_weighted_point_d_object();
+
+ Weighted_point wp = k_constr_wp(
+#ifdef GUDHI_TC_PERTURB_POSITION
+ m_k.translated_point_d_object()(m_points[pt_idx], m_translations[pt_idx]),
+#else
+ m_points[pt_idx],
+#endif
+ m_weights[pt_idx]);
+
+ return wp;
+ }
+
+ Point unproject_point(const Tr_point &p,
+ const Tangent_space_basis &tsb,
+ const Tr_traits &tr_traits) const {
+ typename K::Translated_point_d k_transl =
+ m_k.translated_point_d_object();
+ typename K::Scaled_vector_d k_scaled_vec =
+ m_k.scaled_vector_d_object();
+ typename Tr_traits::Compute_coordinate_d coord =
+ tr_traits.compute_coordinate_d_object();
+
+ Point global_point = compute_perturbed_point(tsb.origin());
+ for (int i = 0; i < m_intrinsic_dim; ++i)
+ global_point = k_transl(global_point,
+ k_scaled_vec(tsb[i], coord(p, i)));
+
+ return global_point;
+ }
+
+ // Project the point in the tangent space
+ // Resulting point coords are expressed in tsb's space
+ Tr_bare_point project_point(const Point &p,
+ const Tangent_space_basis &tsb,
+ const Tr_traits &tr_traits) const {
+ typename K::Scalar_product_d scalar_pdct =
+ m_k.scalar_product_d_object();
+ typename K::Difference_of_points_d diff_points =
+ m_k.difference_of_points_d_object();
+
+ Vector v = diff_points(p, compute_perturbed_point(tsb.origin()));
+
+ std::vector<FT> coords;
+ // Ambiant-space coords of the projected point
+ coords.reserve(tsb.dimension());
+ for (std::size_t i = 0; i < m_intrinsic_dim; ++i) {
+ // Local coords are given by the scalar product with the vectors of tsb
+ FT coord = scalar_pdct(v, tsb[i]);
+ coords.push_back(coord);
+ }
+
+ return tr_traits.construct_point_d_object()(
+ static_cast<int> (coords.size()), coords.begin(), coords.end());
+ }
+
+ // Project the point in the tangent space
+ // The weight will be the squared distance between p and the projection of p
+ // Resulting point coords are expressed in tsb's space
+
+ Tr_point project_point_and_compute_weight(const Weighted_point &wp,
+ const Tangent_space_basis &tsb,
+ const Tr_traits &tr_traits) const {
+ typename K::Point_drop_weight_d k_drop_w =
+ m_k.point_drop_weight_d_object();
+ typename K::Compute_weight_d k_point_weight =
+ m_k.compute_weight_d_object();
+ return project_point_and_compute_weight(
+ k_drop_w(wp), k_point_weight(wp), tsb, tr_traits);
+ }
+
+ // Same as above, with slightly different parameters
+ Tr_point project_point_and_compute_weight(const Point &p, const FT w,
+ const Tangent_space_basis &tsb,
+ const Tr_traits &tr_traits) const {
+ const int point_dim = m_k.point_dimension_d_object()(p);
+
+ typename K::Construct_point_d constr_pt =
+ m_k.construct_point_d_object();
+ typename K::Scalar_product_d scalar_pdct =
+ m_k.scalar_product_d_object();
+ typename K::Difference_of_points_d diff_points =
+ m_k.difference_of_points_d_object();
+ typename K::Compute_coordinate_d coord =
+ m_k.compute_coordinate_d_object();
+ typename K::Construct_cartesian_const_iterator_d ccci =
+ m_k.construct_cartesian_const_iterator_d_object();
+
+ Point origin = compute_perturbed_point(tsb.origin());
+ Vector v = diff_points(p, origin);
+
+ // Same dimension? Then the weight is 0
+ bool same_dim = (point_dim == tsb.dimension());
+
+ std::vector<FT> coords;
+ // Ambiant-space coords of the projected point
+ std::vector<FT> p_proj(ccci(origin), ccci(origin, 0));
+ coords.reserve(tsb.dimension());
+ for (int i = 0; i < tsb.dimension(); ++i) {
+ // Local coords are given by the scalar product with the vectors of tsb
+ FT c = scalar_pdct(v, tsb[i]);
+ coords.push_back(c);
+
+ // p_proj += c * tsb[i]
+ if (!same_dim) {
+ for (int j = 0; j < point_dim; ++j)
+ p_proj[j] += c * coord(tsb[i], j);
+ }
+ }
+
+ // Same dimension? Then the weight is 0
+ FT sq_dist_to_proj_pt = 0;
+ if (!same_dim) {
+ Point projected_pt = constr_pt(point_dim, p_proj.begin(), p_proj.end());
+ sq_dist_to_proj_pt = m_k.squared_distance_d_object()(p, projected_pt);
+ }
+
+ return tr_traits.construct_weighted_point_d_object()
+ (tr_traits.construct_point_d_object()(static_cast<int> (coords.size()), coords.begin(), coords.end()),
+ w - sq_dist_to_proj_pt);
+ }
+
+ // Project all the points in the tangent space
+
+ template <typename Indexed_point_range>
+ std::vector<Tr_point> project_points_and_compute_weights(
+ const Indexed_point_range &point_indices,
+ const Tangent_space_basis &tsb,
+ const Tr_traits &tr_traits) const {
+ std::vector<Tr_point> ret;
+ for (typename Indexed_point_range::const_iterator
+ it = point_indices.begin(), it_end = point_indices.end();
+ it != it_end; ++it) {
+ ret.push_back(project_point_and_compute_weight(
+ compute_perturbed_weighted_point(*it), tsb, tr_traits));
+ }
+ return ret;
+ }
+
+ // A simplex here is a local tri's full cell handle
+
+ bool is_simplex_consistent(Tr_full_cell_handle fch, int cur_dim) const {
+ Simplex c;
+ for (int i = 0; i < cur_dim + 1; ++i) {
+ std::size_t data = fch->vertex(i)->data();
+ c.insert(data);
+ }
+ return is_simplex_consistent(c);
+ }
+
+ // A simplex here is a list of point indices
+ // TODO(CJ): improve it like the other "is_simplex_consistent" below
+
+ bool is_simplex_consistent(Simplex const& simplex) const {
+ // Check if the simplex is in the stars of all its vertices
+ Simplex::const_iterator it_point_idx = simplex.begin();
+ // For each point p of the simplex, we parse the incidents cells of p
+ // and we check if "simplex" is among them
+ for (; it_point_idx != simplex.end(); ++it_point_idx) {
+ std::size_t point_idx = *it_point_idx;
+ // Don't check infinite simplices
+ if (point_idx == (std::numeric_limits<std::size_t>::max)())
+ continue;
+
+ Star const& star = m_stars[point_idx];
+
+ // What we're looking for is "simplex" \ point_idx
+ Incident_simplex is_to_find = simplex;
+ is_to_find.erase(point_idx);
+
+ // For each cell
+ if (std::find(star.begin(), star.end(), is_to_find) == star.end())
+ return false;
+ }
+
+ return true;
+ }
+
+ // A simplex here is a list of point indices
+ // "s" contains all the points of the simplex except "center_point"
+ // This function returns the points whose star doesn't contain the simplex
+ // N.B.: the function assumes that the simplex is contained in
+ // star(center_point)
+
+ template <typename OutputIterator> // value_type = std::size_t
+ bool is_simplex_consistent(
+ std::size_t center_point,
+ Incident_simplex const& s, // without "center_point"
+ OutputIterator points_whose_star_does_not_contain_s,
+ bool check_also_in_non_maximal_faces = false) const {
+ Simplex full_simplex = s;
+ full_simplex.insert(center_point);
+
+ // Check if the simplex is in the stars of all its vertices
+ Incident_simplex::const_iterator it_point_idx = s.begin();
+ // For each point p of the simplex, we parse the incidents cells of p
+ // and we check if "simplex" is among them
+ for (; it_point_idx != s.end(); ++it_point_idx) {
+ std::size_t point_idx = *it_point_idx;
+ // Don't check infinite simplices
+ if (point_idx == (std::numeric_limits<std::size_t>::max)())
+ continue;
+
+ Star const& star = m_stars[point_idx];
+
+ // What we're looking for is full_simplex \ point_idx
+ Incident_simplex is_to_find = full_simplex;
+ is_to_find.erase(point_idx);
+
+ if (check_also_in_non_maximal_faces) {
+ // For each simplex "is" of the star, check if ic_to_simplex is
+ // included in "is"
+ bool found = false;
+ for (Star::const_iterator is = star.begin(), is_end = star.end();
+ !found && is != is_end; ++is) {
+ if (std::includes(is->begin(), is->end(),
+ is_to_find.begin(), is_to_find.end()))
+ found = true;
+ }
+
+ if (!found)
+ *points_whose_star_does_not_contain_s++ = point_idx;
+ } else {
+ // Does the star contain is_to_find?
+ if (std::find(star.begin(), star.end(), is_to_find) == star.end())
+ *points_whose_star_does_not_contain_s++ = point_idx;
+ }
+ }
+
+ return true;
+ }
+
+ // A simplex here is a list of point indices
+ // It looks for s in star(p).
+ // "s" contains all the points of the simplex except p.
+ bool is_simplex_in_star(std::size_t p,
+ Incident_simplex const& s,
+ bool check_also_in_non_maximal_faces = true) const {
+ Star const& star = m_stars[p];
+
+ if (check_also_in_non_maximal_faces) {
+ // For each simplex "is" of the star, check if ic_to_simplex is
+ // included in "is"
+ bool found = false;
+ for (Star::const_iterator is = star.begin(), is_end = star.end();
+ !found && is != is_end; ++is) {
+ if (std::includes(is->begin(), is->end(), s.begin(), s.end()))
+ found = true;
+ }
+
+ return found;
+ } else {
+ return !(std::find(star.begin(), star.end(), s) == star.end());
+ }
+ }
+
+#ifdef GUDHI_USE_TBB
+ // Functor for try_to_solve_inconsistencies_in_a_local_triangulation function
+ class Try_to_solve_inconsistencies_in_a_local_triangulation {
+ Tangential_complex & m_tc;
+ double m_max_perturb;
+ tbb::combinable<std::size_t> &m_num_inconsistencies;
+ tbb::combinable<std::vector<std::size_t> > &m_updated_points;
+
+ public:
+ // Constructor
+ Try_to_solve_inconsistencies_in_a_local_triangulation(Tangential_complex &tc,
+ double max_perturb,
+ tbb::combinable<std::size_t> &num_inconsistencies,
+ tbb::combinable<std::vector<std::size_t> > &updated_points)
+ : m_tc(tc),
+ m_max_perturb(max_perturb),
+ m_num_inconsistencies(num_inconsistencies),
+ m_updated_points(updated_points) { }
+
+ // Constructor
+ Try_to_solve_inconsistencies_in_a_local_triangulation(const Try_to_solve_inconsistencies_in_a_local_triangulation&
+ tsilt)
+ : m_tc(tsilt.m_tc),
+ m_max_perturb(tsilt.m_max_perturb),
+ m_num_inconsistencies(tsilt.m_num_inconsistencies),
+ m_updated_points(tsilt.m_updated_points) { }
+
+ // operator()
+ void operator()(const tbb::blocked_range<size_t>& r) const {
+ for (size_t i = r.begin(); i != r.end(); ++i) {
+ m_num_inconsistencies.local() +=
+ m_tc.try_to_solve_inconsistencies_in_a_local_triangulation(i, m_max_perturb,
+ std::back_inserter(m_updated_points.local()));
+ }
+ }
+ };
+#endif // GUDHI_USE_TBB
+
+ void perturb(std::size_t point_idx, double max_perturb) {
+ const Tr_traits &local_tr_traits =
+ m_triangulations[point_idx].tr().geom_traits();
+ typename Tr_traits::Compute_coordinate_d coord =
+ local_tr_traits.compute_coordinate_d_object();
+ typename K::Translated_point_d k_transl =
+ m_k.translated_point_d_object();
+ typename K::Construct_vector_d k_constr_vec =
+ m_k.construct_vector_d_object();
+ typename K::Scaled_vector_d k_scaled_vec =
+ m_k.scaled_vector_d_object();
+
+ CGAL::Random_points_in_ball_d<Tr_bare_point>
+ tr_point_in_ball_generator(m_intrinsic_dim,
+ m_random_generator.get_double(0., max_perturb));
+
+ Tr_point local_random_transl =
+ local_tr_traits.construct_weighted_point_d_object()(*tr_point_in_ball_generator++, 0);
+ Translation_for_perturb global_transl = k_constr_vec(m_ambient_dim);
+ const Tangent_space_basis &tsb = m_tangent_spaces[point_idx];
+ for (int i = 0; i < m_intrinsic_dim; ++i) {
+ global_transl = k_transl(global_transl,
+ k_scaled_vec(tsb[i], coord(local_random_transl, i)));
+ }
+ // Parallel
+#if defined(GUDHI_USE_TBB)
+ m_p_perturb_mutexes[point_idx].lock();
+ m_translations[point_idx] = global_transl;
+ m_p_perturb_mutexes[point_idx].unlock();
+ // Sequential
+#else
+ m_translations[point_idx] = global_transl;
+#endif
+ }
+
+ // Return true if inconsistencies were found
+ template <typename OutputIt>
+ bool try_to_solve_inconsistencies_in_a_local_triangulation(std::size_t tr_index,
+ double max_perturb,
+ OutputIt perturbed_pts_indices = CGAL::Emptyset_iterator()) {
+ bool is_inconsistent = false;
+
+ Star const& star = m_stars[tr_index];
+ Tr_vertex_handle center_vh = m_triangulations[tr_index].center_vertex();
+
+ // For each incident simplex
+ Star::const_iterator it_inc_simplex = star.begin();
+ Star::const_iterator it_inc_simplex_end = star.end();
+ for (; it_inc_simplex != it_inc_simplex_end; ++it_inc_simplex) {
+ const Incident_simplex &incident_simplex = *it_inc_simplex;
+
+ // Don't check infinite cells
+ if (is_infinite(incident_simplex))
+ continue;
+
+ Simplex c = incident_simplex;
+ c.insert(tr_index); // Add the missing index
+
+ // Perturb the center point
+ if (!is_simplex_consistent(c)) {
+ is_inconsistent = true;
+
+ std::size_t idx = tr_index;
+
+ perturb(tr_index, max_perturb);
+ *perturbed_pts_indices++ = idx;
+
+ // We will try the other cells next time
+ break;
+ }
+ }
+
+ return is_inconsistent;
+ }
+
+
+ // 1st line: number of points
+ // Then one point per line
+ std::ostream &export_point_set(std::ostream & os,
+ bool use_perturbed_points = false,
+ const char *coord_separator = " ") const {
+ if (use_perturbed_points) {
+ std::vector<Point> perturbed_points;
+ perturbed_points.reserve(m_points.size());
+ for (std::size_t i = 0; i < m_points.size(); ++i)
+ perturbed_points.push_back(compute_perturbed_point(i));
+
+ return export_point_set(
+ m_k, perturbed_points, os, coord_separator);
+ } else {
+ return export_point_set(
+ m_k, m_points, os, coord_separator);
+ }
+ }
+
+ template<typename ProjectionFunctor = CGAL::Identity<Point> >
+ std::ostream &export_vertices_to_off(
+ std::ostream & os, std::size_t &num_vertices,
+ bool use_perturbed_points = false,
+ ProjectionFunctor const& point_projection = ProjectionFunctor()) const {
+ if (m_points.empty()) {
+ num_vertices = 0;
+ return os;
+ }
+
+ // If m_intrinsic_dim = 1, we output each point two times
+ // to be able to export each segment as a flat triangle with 3 different
+ // indices (otherwise, Meshlab detects degenerated simplices)
+ const int N = (m_intrinsic_dim == 1 ? 2 : 1);
+
+ // Kernel functors
+ typename K::Compute_coordinate_d coord =
+ m_k.compute_coordinate_d_object();
+
+#ifdef GUDHI_TC_EXPORT_ALL_COORDS_IN_OFF
+ int num_coords = m_ambient_dim;
+#else
+ int num_coords = (std::min)(m_ambient_dim, 3);
+#endif
+
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ OS_container::const_iterator it_os = m_orth_spaces.begin();
+#endif
+ typename Points::const_iterator it_p = m_points.begin();
+ typename Points::const_iterator it_p_end = m_points.end();
+ // For each point p
+ for (std::size_t i = 0; it_p != it_p_end; ++it_p, ++i) {
+ Point p = point_projection(
+ use_perturbed_points ? compute_perturbed_point(i) : *it_p);
+ for (int ii = 0; ii < N; ++ii) {
+ int j = 0;
+ for (; j < num_coords; ++j)
+ os << CGAL::to_double(coord(p, j)) << " ";
+ if (j == 2)
+ os << "0";
+
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ for (j = 0; j < num_coords; ++j)
+ os << " " << CGAL::to_double(coord(*it_os->begin(), j));
+#endif
+ os << "\n";
+ }
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ ++it_os;
+#endif
+ }
+
+ num_vertices = N * m_points.size();
+ return os;
+ }
+
+ std::ostream &export_simplices_to_off(std::ostream & os, std::size_t &num_OFF_simplices,
+ bool color_inconsistencies = false,
+ Simplex_set const *p_simpl_to_color_in_red = NULL,
+ Simplex_set const *p_simpl_to_color_in_green = NULL,
+ Simplex_set const *p_simpl_to_color_in_blue = NULL)
+ const {
+ // If m_intrinsic_dim = 1, each point is output two times
+ // (see export_vertices_to_off)
+ num_OFF_simplices = 0;
+ std::size_t num_maximal_simplices = 0;
+ std::size_t num_inconsistent_maximal_simplices = 0;
+ std::size_t num_inconsistent_stars = 0;
+ typename Tr_container::const_iterator it_tr = m_triangulations.begin();
+ typename Tr_container::const_iterator it_tr_end = m_triangulations.end();
+ // For each triangulation
+ for (std::size_t idx = 0; it_tr != it_tr_end; ++it_tr, ++idx) {
+ bool is_star_inconsistent = false;
+
+ Triangulation const& tr = it_tr->tr();
+ Tr_vertex_handle center_vh = it_tr->center_vertex();
+
+ if (tr.current_dimension() < m_intrinsic_dim)
+ continue;
+
+ // Color for this star
+ std::stringstream color;
+ // color << rand()%256 << " " << 100+rand()%156 << " " << 100+rand()%156;
+ color << 128 << " " << 128 << " " << 128;
+
+ // Gather the triangles here, with an int telling its color
+ typedef std::vector<std::pair<Simplex, int> > Star_using_triangles;
+ Star_using_triangles star_using_triangles;
+
+ // For each cell of the star
+ Star::const_iterator it_inc_simplex = m_stars[idx].begin();
+ Star::const_iterator it_inc_simplex_end = m_stars[idx].end();
+ for (; it_inc_simplex != it_inc_simplex_end; ++it_inc_simplex) {
+ Simplex c = *it_inc_simplex;
+ c.insert(idx);
+ std::size_t num_vertices = c.size();
+ ++num_maximal_simplices;
+
+ int color_simplex = -1; // -1=no color, 0=yellow, 1=red, 2=green, 3=blue
+ if (color_inconsistencies && !is_simplex_consistent(c)) {
+ ++num_inconsistent_maximal_simplices;
+ color_simplex = 0;
+ is_star_inconsistent = true;
+ } else {
+ if (p_simpl_to_color_in_red &&
+ std::find(
+ p_simpl_to_color_in_red->begin(),
+ p_simpl_to_color_in_red->end(),
+ c) != p_simpl_to_color_in_red->end()) {
+ color_simplex = 1;
+ } else if (p_simpl_to_color_in_green &&
+ std::find(
+ p_simpl_to_color_in_green->begin(),
+ p_simpl_to_color_in_green->end(),
+ c) != p_simpl_to_color_in_green->end()) {
+ color_simplex = 2;
+ } else if (p_simpl_to_color_in_blue &&
+ std::find(
+ p_simpl_to_color_in_blue->begin(),
+ p_simpl_to_color_in_blue->end(),
+ c) != p_simpl_to_color_in_blue->end()) {
+ color_simplex = 3;
+ }
+ }
+
+ // If m_intrinsic_dim = 1, each point is output two times,
+ // so we need to multiply each index by 2
+ // And if only 2 vertices, add a third one (each vertex is duplicated in
+ // the file when m_intrinsic dim = 2)
+ if (m_intrinsic_dim == 1) {
+ Simplex tmp_c;
+ Simplex::iterator it = c.begin();
+ for (; it != c.end(); ++it)
+ tmp_c.insert(*it * 2);
+ if (num_vertices == 2)
+ tmp_c.insert(*tmp_c.rbegin() + 1);
+
+ c = tmp_c;
+ }
+
+ if (num_vertices <= 3) {
+ star_using_triangles.push_back(std::make_pair(c, color_simplex));
+ } else {
+ // num_vertices >= 4: decompose the simplex in triangles
+ std::vector<bool> booleans(num_vertices, false);
+ std::fill(booleans.begin() + num_vertices - 3, booleans.end(), true);
+ do {
+ Simplex triangle;
+ Simplex::iterator it = c.begin();
+ for (int i = 0; it != c.end(); ++i, ++it) {
+ if (booleans[i])
+ triangle.insert(*it);
+ }
+ star_using_triangles.push_back(
+ std::make_pair(triangle, color_simplex));
+ } while (std::next_permutation(booleans.begin(), booleans.end()));
+ }
+ }
+
+ // For each cell
+ Star_using_triangles::const_iterator it_simplex =
+ star_using_triangles.begin();
+ Star_using_triangles::const_iterator it_simplex_end =
+ star_using_triangles.end();
+ for (; it_simplex != it_simplex_end; ++it_simplex) {
+ const Simplex &c = it_simplex->first;
+
+ // Don't export infinite cells
+ if (is_infinite(c))
+ continue;
+
+ int color_simplex = it_simplex->second;
+
+ std::stringstream sstr_c;
+
+ Simplex::const_iterator it_point_idx = c.begin();
+ for (; it_point_idx != c.end(); ++it_point_idx) {
+ sstr_c << *it_point_idx << " ";
+ }
+
+ os << 3 << " " << sstr_c.str();
+ if (color_inconsistencies || p_simpl_to_color_in_red
+ || p_simpl_to_color_in_green || p_simpl_to_color_in_blue) {
+ switch (color_simplex) {
+ case 0: os << " 255 255 0";
+ break;
+ case 1: os << " 255 0 0";
+ break;
+ case 2: os << " 0 255 0";
+ break;
+ case 3: os << " 0 0 255";
+ break;
+ default: os << " " << color.str();
+ break;
+ }
+ }
+ ++num_OFF_simplices;
+ os << "\n";
+ }
+ if (is_star_inconsistent)
+ ++num_inconsistent_stars;
+ }
+
+#ifdef DEBUG_TRACES
+ std::cerr
+ << "\n==========================================================\n"
+ << "Export from list of stars to OFF:\n"
+ << " * Number of vertices: " << m_points.size() << "\n"
+ << " * Total number of maximal simplices: " << num_maximal_simplices
+ << "\n";
+ if (color_inconsistencies) {
+ std::cerr
+ << " * Number of inconsistent stars: "
+ << num_inconsistent_stars << " ("
+ << (m_points.size() > 0 ?
+ 100. * num_inconsistent_stars / m_points.size() : 0.) << "%)\n"
+ << " * Number of inconsistent maximal simplices: "
+ << num_inconsistent_maximal_simplices << " ("
+ << (num_maximal_simplices > 0 ?
+ 100. * num_inconsistent_maximal_simplices / num_maximal_simplices
+ : 0.) << "%)\n";
+ }
+ std::cerr << "==========================================================\n";
+#endif
+
+ return os;
+ }
+
+ public:
+ std::ostream &export_simplices_to_off(
+ const Simplicial_complex &complex,
+ std::ostream & os, std::size_t &num_OFF_simplices,
+ Simplex_set const *p_simpl_to_color_in_red = NULL,
+ Simplex_set const *p_simpl_to_color_in_green = NULL,
+ Simplex_set const *p_simpl_to_color_in_blue = NULL)
+ const {
+ typedef Simplicial_complex::Simplex Simplex;
+ typedef Simplicial_complex::Simplex_set Simplex_set;
+
+ // If m_intrinsic_dim = 1, each point is output two times
+ // (see export_vertices_to_off)
+ num_OFF_simplices = 0;
+ std::size_t num_maximal_simplices = 0;
+
+ typename Simplex_set::const_iterator it_s =
+ complex.simplex_range().begin();
+ typename Simplex_set::const_iterator it_s_end =
+ complex.simplex_range().end();
+ // For each simplex
+ for (; it_s != it_s_end; ++it_s) {
+ Simplex c = *it_s;
+ ++num_maximal_simplices;
+
+ int color_simplex = -1; // -1=no color, 0=yellow, 1=red, 2=green, 3=blue
+ if (p_simpl_to_color_in_red &&
+ std::find(
+ p_simpl_to_color_in_red->begin(),
+ p_simpl_to_color_in_red->end(),
+ c) != p_simpl_to_color_in_red->end()) {
+ color_simplex = 1;
+ } else if (p_simpl_to_color_in_green &&
+ std::find(p_simpl_to_color_in_green->begin(),
+ p_simpl_to_color_in_green->end(),
+ c) != p_simpl_to_color_in_green->end()) {
+ color_simplex = 2;
+ } else if (p_simpl_to_color_in_blue &&
+ std::find(p_simpl_to_color_in_blue->begin(),
+ p_simpl_to_color_in_blue->end(),
+ c) != p_simpl_to_color_in_blue->end()) {
+ color_simplex = 3;
+ }
+
+ // Gather the triangles here
+ typedef std::vector<Simplex> Triangles;
+ Triangles triangles;
+
+ int num_vertices = static_cast<int>(c.size());
+ // Do not export smaller dimension simplices
+ if (num_vertices < m_intrinsic_dim + 1)
+ continue;
+
+ // If m_intrinsic_dim = 1, each point is output two times,
+ // so we need to multiply each index by 2
+ // And if only 2 vertices, add a third one (each vertex is duplicated in
+ // the file when m_intrinsic dim = 2)
+ if (m_intrinsic_dim == 1) {
+ Simplex tmp_c;
+ Simplex::iterator it = c.begin();
+ for (; it != c.end(); ++it)
+ tmp_c.insert(*it * 2);
+ if (num_vertices == 2)
+ tmp_c.insert(*tmp_c.rbegin() + 1);
+
+ c = tmp_c;
+ }
+
+ if (num_vertices <= 3) {
+ triangles.push_back(c);
+ } else {
+ // num_vertices >= 4: decompose the simplex in triangles
+ std::vector<bool> booleans(num_vertices, false);
+ std::fill(booleans.begin() + num_vertices - 3, booleans.end(), true);
+ do {
+ Simplex triangle;
+ Simplex::iterator it = c.begin();
+ for (int i = 0; it != c.end(); ++i, ++it) {
+ if (booleans[i])
+ triangle.insert(*it);
+ }
+ triangles.push_back(triangle);
+ } while (std::next_permutation(booleans.begin(), booleans.end()));
+ }
+
+ // For each cell
+ Triangles::const_iterator it_tri = triangles.begin();
+ Triangles::const_iterator it_tri_end = triangles.end();
+ for (; it_tri != it_tri_end; ++it_tri) {
+ // Don't export infinite cells
+ if (is_infinite(*it_tri))
+ continue;
+
+ os << 3 << " ";
+ Simplex::const_iterator it_point_idx = it_tri->begin();
+ for (; it_point_idx != it_tri->end(); ++it_point_idx) {
+ os << *it_point_idx << " ";
+ }
+
+ if (p_simpl_to_color_in_red || p_simpl_to_color_in_green
+ || p_simpl_to_color_in_blue) {
+ switch (color_simplex) {
+ case 0: os << " 255 255 0";
+ break;
+ case 1: os << " 255 0 0";
+ break;
+ case 2: os << " 0 255 0";
+ break;
+ case 3: os << " 0 0 255";
+ break;
+ default: os << " 128 128 128";
+ break;
+ }
+ }
+
+ ++num_OFF_simplices;
+ os << "\n";
+ }
+ }
+
+#ifdef DEBUG_TRACES
+ std::cerr
+ << "\n==========================================================\n"
+ << "Export from complex to OFF:\n"
+ << " * Number of vertices: " << m_points.size() << "\n"
+ << " * Total number of maximal simplices: " << num_maximal_simplices
+ << "\n"
+ << "==========================================================\n";
+#endif
+
+ return os;
+ }
+
+ private:
+ const K m_k;
+ const int m_intrinsic_dim;
+ const int m_ambient_dim;
+
+ Points m_points;
+ Weights m_weights;
+#ifdef GUDHI_TC_PERTURB_POSITION
+ Translations_for_perturb m_translations;
+#if defined(GUDHI_USE_TBB)
+ Mutex_for_perturb *m_p_perturb_mutexes;
+#endif
+#endif
+
+ Points_ds m_points_ds;
+ double m_last_max_perturb;
+ std::vector<bool> m_are_tangent_spaces_computed;
+ TS_container m_tangent_spaces;
+#ifdef GUDHI_TC_EXPORT_NORMALS
+ OS_container m_orth_spaces;
+#endif
+ Tr_container m_triangulations; // Contains the triangulations
+ // and their center vertex
+ Stars_container m_stars;
+ std::vector<FT> m_squared_star_spheres_radii_incl_margin;
+
+#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
+ Points m_points_for_tse;
+ Points_ds m_points_ds_for_tse;
+#endif
+
+ mutable CGAL::Random m_random_generator;
+}; // /class Tangential_complex
+
+} // end namespace tangential_complex
+} // end namespace Gudhi
+
+#endif // TANGENTIAL_COMPLEX_H_
diff --git a/include/gudhi/Tangential_complex/Simplicial_complex.h b/include/gudhi/Tangential_complex/Simplicial_complex.h
new file mode 100644
index 00000000..65c74ca5
--- /dev/null
+++ b/include/gudhi/Tangential_complex/Simplicial_complex.h
@@ -0,0 +1,539 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TANGENTIAL_COMPLEX_SIMPLICIAL_COMPLEX_H_
+#define TANGENTIAL_COMPLEX_SIMPLICIAL_COMPLEX_H_
+
+#include <gudhi/Tangential_complex/config.h>
+#include <gudhi/Tangential_complex/utilities.h>
+#include <gudhi/Debug_utils.h>
+#include <gudhi/console_color.h>
+
+#include <CGAL/iterator.h>
+
+// For is_pure_pseudomanifold
+#include <boost/graph/graph_traits.hpp>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+#include <boost/container/flat_set.hpp>
+
+#include <algorithm>
+#include <string>
+#include <fstream>
+#include <map> // for map<>
+#include <vector> // for vector<>
+#include <set> // for set<>
+
+namespace Gudhi {
+namespace tangential_complex {
+namespace internal {
+
+class Simplicial_complex {
+ public:
+ typedef boost::container::flat_set<std::size_t> Simplex;
+ typedef std::set<Simplex> Simplex_set;
+
+ // If perform_checks = true, the function:
+ // - won't insert the simplex if it is already in a higher dim simplex
+ // - will erase any lower-dim simplices that are faces of the new simplex
+ // Returns true if the simplex was added
+ bool add_simplex(
+ const Simplex &s, bool perform_checks = true) {
+ if (perform_checks) {
+ unsigned int num_pts = static_cast<int> (s.size());
+ std::vector<Complex::iterator> to_erase;
+ bool check_higher_dim_simpl = true;
+ for (Complex::iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ // Check if the simplex is not already in a higher dim simplex
+ if (check_higher_dim_simpl
+ && it_simplex->size() > num_pts
+ && std::includes(it_simplex->begin(), it_simplex->end(),
+ s.begin(), s.end())) {
+ // No need to insert it, then
+ return false;
+ }
+ // Check if the simplex includes some lower-dim simplices
+ if (it_simplex->size() < num_pts
+ && std::includes(s.begin(), s.end(),
+ it_simplex->begin(), it_simplex->end())) {
+ to_erase.push_back(it_simplex);
+ // We don't need to check higher-sim simplices any more
+ check_higher_dim_simpl = false;
+ }
+ }
+ for (std::vector<Complex::iterator>::const_iterator it = to_erase.begin();
+ it != to_erase.end(); ++it) {
+ m_complex.erase(*it);
+ }
+ }
+ return m_complex.insert(s).second;
+ }
+
+ const Simplex_set &simplex_range() const {
+ return m_complex;
+ }
+
+ bool empty() {
+ return m_complex.empty();
+ }
+
+ void clear() {
+ m_complex.clear();
+ }
+
+ template <typename Test, typename Output_it>
+ void get_simplices_matching_test(Test test, Output_it out) {
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ if (test(*it_simplex))
+ *out++ = *it_simplex;
+ }
+ }
+
+ // When a simplex S has only one co-face C, we can remove S and C
+ // without changing the topology
+
+ void collapse(int max_simplex_dim, bool quiet = false) {
+#ifdef DEBUG_TRACES
+ if (!quiet)
+ std::cerr << "Collapsing... ";
+#endif
+ // We note k = max_simplex_dim - 1
+ int k = max_simplex_dim - 1;
+
+ typedef Complex::iterator Simplex_iterator;
+ typedef std::vector<Simplex_iterator> Simplex_iterator_list;
+ typedef std::map<Simplex, Simplex_iterator_list> Cofaces_map;
+
+ std::size_t num_collapsed_maximal_simplices = 0;
+ do {
+ num_collapsed_maximal_simplices = 0;
+ // Create a map associating each non-maximal k-faces to the list of its
+ // maximal cofaces
+ Cofaces_map cofaces_map;
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ if (static_cast<int> (it_simplex->size()) > k + 1) {
+ std::vector<Simplex> k_faces;
+ // Get the k-faces composing the simplex
+ combinations(*it_simplex, k + 1, std::back_inserter(k_faces));
+ for (const auto &comb : k_faces)
+ cofaces_map[comb].push_back(it_simplex);
+ }
+ }
+
+ // For each non-maximal k-face F, if F has only one maximal coface Cf:
+ // - Look for the other k-faces F2, F3... of Cf in the map and:
+ // * if the list contains only Cf, clear the list (we don't remove the
+ // list since it creates troubles with the iterators) and add the F2,
+ // F3... to the complex
+ // * otherwise, remove Cf from the associated list
+ // - Remove Cf from the complex
+ for (Cofaces_map::const_iterator it_map_elt = cofaces_map.begin(),
+ it_map_end = cofaces_map.end();
+ it_map_elt != it_map_end;
+ ++it_map_elt) {
+ if (it_map_elt->second.size() == 1) {
+ std::vector<Simplex> k_faces;
+ const Simplex_iterator_list::value_type &it_Cf =
+ *it_map_elt->second.begin();
+ GUDHI_CHECK(it_Cf->size() == max_simplex_dim + 1,
+ std::logic_error("Wrong dimension"));
+ // Get the k-faces composing the simplex
+ combinations(*it_Cf, k + 1, std::back_inserter(k_faces));
+ for (const auto &f2 : k_faces) {
+ // Skip F
+ if (f2 != it_map_elt->first) {
+ Cofaces_map::iterator it_comb_in_map = cofaces_map.find(f2);
+ if (it_comb_in_map->second.size() == 1) {
+ it_comb_in_map->second.clear();
+ m_complex.insert(f2);
+ } else { // it_comb_in_map->second.size() > 1
+ Simplex_iterator_list::iterator it = std::find(it_comb_in_map->second.begin(),
+ it_comb_in_map->second.end(),
+ it_Cf);
+ GUDHI_CHECK(it != it_comb_in_map->second.end(),
+ std::logic_error("Error: it == it_comb_in_map->second.end()"));
+ it_comb_in_map->second.erase(it);
+ }
+ }
+ }
+ m_complex.erase(it_Cf);
+ ++num_collapsed_maximal_simplices;
+ }
+ }
+ // Repeat until no maximal simplex got removed
+ } while (num_collapsed_maximal_simplices > 0);
+
+ // Collapse the lower dimension simplices
+ if (k > 0)
+ collapse(max_simplex_dim - 1, true);
+
+#ifdef DEBUG_TRACES
+ if (!quiet)
+ std::cerr << "done.\n";
+#endif
+ }
+
+ void display_stats() const {
+ std::cerr << yellow << "Complex stats:\n" << white;
+
+ if (m_complex.empty()) {
+ std::cerr << " * No simplices.\n";
+ } else {
+ // Number of simplex for each dimension
+ std::map<int, std::size_t> simplex_stats;
+
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ ++simplex_stats[static_cast<int> (it_simplex->size()) - 1];
+ }
+
+ for (std::map<int, std::size_t>::const_iterator it_map = simplex_stats.begin();
+ it_map != simplex_stats.end(); ++it_map) {
+ std::cerr << " * " << it_map->first << "-simplices: "
+ << it_map->second << "\n";
+ }
+ }
+ }
+
+ // verbose_level = 0, 1 or 2
+ bool is_pure_pseudomanifold__do_not_check_if_stars_are_connected(int simplex_dim,
+ bool allow_borders = false,
+ bool exit_at_the_first_problem = false,
+ int verbose_level = 0,
+ std::size_t *p_num_wrong_dim_simplices = NULL,
+ std::size_t *p_num_wrong_number_of_cofaces = NULL) const {
+ typedef Simplex K_1_face;
+ typedef std::map<K_1_face, std::size_t> Cofaces_map;
+
+ std::size_t num_wrong_dim_simplices = 0;
+ std::size_t num_wrong_number_of_cofaces = 0;
+
+ // Counts the number of cofaces of each K_1_face
+
+ // Create a map associating each non-maximal k-faces to the list of its
+ // maximal cofaces
+ Cofaces_map cofaces_map;
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ if (static_cast<int> (it_simplex->size()) != simplex_dim + 1) {
+ if (verbose_level >= 2)
+ std::cerr << "Found a simplex with dim = "
+ << it_simplex->size() - 1 << "\n";
+ ++num_wrong_dim_simplices;
+ } else {
+ std::vector<K_1_face> k_1_faces;
+ // Get the facets composing the simplex
+ combinations(
+ *it_simplex, simplex_dim, std::back_inserter(k_1_faces));
+ for (const auto &k_1_face : k_1_faces) {
+ ++cofaces_map[k_1_face];
+ }
+ }
+ }
+
+ for (Cofaces_map::const_iterator it_map_elt = cofaces_map.begin(),
+ it_map_end = cofaces_map.end();
+ it_map_elt != it_map_end;
+ ++it_map_elt) {
+ if (it_map_elt->second != 2
+ && (!allow_borders || it_map_elt->second != 1)) {
+ if (verbose_level >= 2)
+ std::cerr << "Found a k-1-face with "
+ << it_map_elt->second << " cofaces\n";
+
+ if (exit_at_the_first_problem)
+ return false;
+ else
+ ++num_wrong_number_of_cofaces;
+ }
+ }
+
+ bool ret = num_wrong_dim_simplices == 0 && num_wrong_number_of_cofaces == 0;
+
+ if (verbose_level >= 1) {
+ std::cerr << "Pure pseudo-manifold: ";
+ if (ret) {
+ std::cerr << green << "YES" << white << "\n";
+ } else {
+ std::cerr << red << "NO" << white << "\n"
+ << " * Number of wrong dimension simplices: "
+ << num_wrong_dim_simplices << "\n"
+ << " * Number of wrong number of cofaces: "
+ << num_wrong_number_of_cofaces << "\n";
+ }
+ }
+
+ if (p_num_wrong_dim_simplices)
+ *p_num_wrong_dim_simplices = num_wrong_dim_simplices;
+ if (p_num_wrong_number_of_cofaces)
+ *p_num_wrong_number_of_cofaces = num_wrong_number_of_cofaces;
+
+ return ret;
+ }
+
+ template <int K>
+ std::size_t num_K_simplices() const {
+ Simplex_set k_simplices;
+
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ if (it_simplex->size() == K + 1) {
+ k_simplices.insert(*it_simplex);
+ } else if (it_simplex->size() > K + 1) {
+ // Get the k-faces composing the simplex
+ combinations(
+ *it_simplex, K + 1, std::inserter(k_simplices, k_simplices.begin()));
+ }
+ }
+
+ return k_simplices.size();
+ }
+
+ std::ptrdiff_t euler_characteristic(bool verbose = false) const {
+ if (verbose)
+ std::cerr << "\nComputing Euler characteristic of the complex...\n";
+
+ std::size_t num_vertices = num_K_simplices<0>();
+ std::size_t num_edges = num_K_simplices<1>();
+ std::size_t num_triangles = num_K_simplices<2>();
+
+ std::ptrdiff_t ec =
+ (std::ptrdiff_t) num_vertices
+ - (std::ptrdiff_t) num_edges
+ + (std::ptrdiff_t) num_triangles;
+
+ if (verbose)
+ std::cerr << "Euler characteristic: V - E + F = "
+ << num_vertices << " - " << num_edges << " + " << num_triangles << " = "
+ << blue
+ << ec
+ << white << "\n";
+
+ return ec;
+ }
+
+ // TODO(CJ): ADD COMMENTS
+
+ bool is_pure_pseudomanifold(
+ int simplex_dim,
+ std::size_t num_vertices,
+ bool allow_borders = false,
+ bool exit_at_the_first_problem = false,
+ int verbose_level = 0,
+ std::size_t *p_num_wrong_dim_simplices = NULL,
+ std::size_t *p_num_wrong_number_of_cofaces = NULL,
+ std::size_t *p_num_unconnected_stars = NULL,
+ Simplex_set *p_wrong_dim_simplices = NULL,
+ Simplex_set *p_wrong_number_of_cofaces_simplices = NULL,
+ Simplex_set *p_unconnected_stars_simplices = NULL) const {
+ // If simplex_dim == 1, we do not need to check if stars are connected
+ if (simplex_dim == 1) {
+ if (p_num_unconnected_stars)
+ *p_num_unconnected_stars = 0;
+ return is_pure_pseudomanifold__do_not_check_if_stars_are_connected(simplex_dim,
+ allow_borders,
+ exit_at_the_first_problem,
+ verbose_level,
+ p_num_wrong_dim_simplices,
+ p_num_wrong_number_of_cofaces);
+ }
+ // Associates each vertex (= the index in the vector)
+ // to its star (list of simplices)
+ typedef std::vector<std::vector<Complex::const_iterator> > Stars;
+ std::size_t num_wrong_dim_simplices = 0;
+ std::size_t num_wrong_number_of_cofaces = 0;
+ std::size_t num_unconnected_stars = 0;
+
+ // Fills a Stars data structure
+ Stars stars;
+ stars.resize(num_vertices);
+ for (Complex::const_iterator it_simplex = m_complex.begin(),
+ it_simplex_end = m_complex.end();
+ it_simplex != it_simplex_end;
+ ++it_simplex) {
+ if (static_cast<int> (it_simplex->size()) != simplex_dim + 1) {
+ if (verbose_level >= 2)
+ std::cerr << "Found a simplex with dim = "
+ << it_simplex->size() - 1 << "\n";
+ ++num_wrong_dim_simplices;
+ if (p_wrong_dim_simplices)
+ p_wrong_dim_simplices->insert(*it_simplex);
+ } else {
+ for (Simplex::const_iterator it_point_idx = it_simplex->begin();
+ it_point_idx != it_simplex->end();
+ ++it_point_idx) {
+ stars[*it_point_idx].push_back(it_simplex);
+ }
+ }
+ }
+
+ // Now, for each star, we have a vector of its d-simplices
+ // i.e. one index for each d-simplex
+ // Boost Graph only deals with indexes, so we also need indexes for the
+ // (d-1)-simplices
+ std::size_t center_vertex_index = 0;
+ for (Stars::const_iterator it_star = stars.begin();
+ it_star != stars.end();
+ ++it_star, ++center_vertex_index) {
+ typedef std::map<Simplex, std::vector<std::size_t> >
+ Dm1_faces_to_adj_D_faces;
+ Dm1_faces_to_adj_D_faces dm1_faces_to_adj_d_faces;
+
+ for (std::size_t i_dsimpl = 0; i_dsimpl < it_star->size(); ++i_dsimpl) {
+ Simplex dm1_simpl_of_link = *((*it_star)[i_dsimpl]);
+ dm1_simpl_of_link.erase(center_vertex_index);
+ // Copy it to a vector so that we can use operator[] on it
+ std::vector<std::size_t> dm1_simpl_of_link_vec(
+ dm1_simpl_of_link.begin(), dm1_simpl_of_link.end());
+
+ CGAL::Combination_enumerator<int> dm2_simplices(
+ simplex_dim - 1, 0, simplex_dim);
+ for (; !dm2_simplices.finished(); ++dm2_simplices) {
+ Simplex dm2_simpl;
+ for (int j = 0; j < simplex_dim - 1; ++j)
+ dm2_simpl.insert(dm1_simpl_of_link_vec[dm2_simplices[j]]);
+ dm1_faces_to_adj_d_faces[dm2_simpl].push_back(i_dsimpl);
+ }
+ }
+
+ Adj_graph adj_graph;
+ std::vector<Graph_vertex> d_faces_descriptors;
+ d_faces_descriptors.resize(it_star->size());
+ for (std::size_t j = 0; j < it_star->size(); ++j)
+ d_faces_descriptors[j] = boost::add_vertex(adj_graph);
+
+ Dm1_faces_to_adj_D_faces::const_iterator dm1_to_d_it =
+ dm1_faces_to_adj_d_faces.begin();
+ Dm1_faces_to_adj_D_faces::const_iterator dm1_to_d_it_end =
+ dm1_faces_to_adj_d_faces.end();
+ for (std::size_t i_km1_face = 0;
+ dm1_to_d_it != dm1_to_d_it_end;
+ ++dm1_to_d_it, ++i_km1_face) {
+ Graph_vertex km1_gv = boost::add_vertex(adj_graph);
+
+ for (std::vector<std::size_t>::const_iterator kface_it =
+ dm1_to_d_it->second.begin();
+ kface_it != dm1_to_d_it->second.end();
+ ++kface_it) {
+ boost::add_edge(km1_gv, *kface_it, adj_graph);
+ }
+
+ if (dm1_to_d_it->second.size() != 2
+ && (!allow_borders || dm1_to_d_it->second.size() != 1)) {
+ ++num_wrong_number_of_cofaces;
+ if (p_wrong_number_of_cofaces_simplices) {
+ for (auto idx : dm1_to_d_it->second)
+ p_wrong_number_of_cofaces_simplices->insert(*((*it_star)[idx]));
+ }
+ }
+ }
+
+ // What is left is to check the connexity
+ bool is_connected = true;
+ if (boost::num_vertices(adj_graph) > 0) {
+ std::vector<int> components(boost::num_vertices(adj_graph));
+ is_connected =
+ (boost::connected_components(adj_graph, &components[0]) == 1);
+ }
+
+ if (!is_connected) {
+ if (verbose_level >= 2)
+ std::cerr << "Error: star #" << center_vertex_index
+ << " is not connected\n";
+ ++num_unconnected_stars;
+ if (p_unconnected_stars_simplices) {
+ for (std::vector<Complex::const_iterator>::const_iterator
+ it_simpl = it_star->begin(),
+ it_simpl_end = it_star->end();
+ it_simpl != it_simpl_end;
+ ++it_simpl) {
+ p_unconnected_stars_simplices->insert(**it_simpl);
+ }
+ }
+ }
+ }
+
+ // Each one has been counted several times ("simplex_dim" times)
+ num_wrong_number_of_cofaces /= simplex_dim;
+
+ bool ret =
+ num_wrong_dim_simplices == 0
+ && num_wrong_number_of_cofaces == 0
+ && num_unconnected_stars == 0;
+
+ if (verbose_level >= 1) {
+ std::cerr << "Pure pseudo-manifold: ";
+ if (ret) {
+ std::cerr << green << "YES" << white << "\n";
+ } else {
+ std::cerr << red << "NO" << white << "\n"
+ << " * Number of wrong dimension simplices: "
+ << num_wrong_dim_simplices << "\n"
+ << " * Number of wrong number of cofaces: "
+ << num_wrong_number_of_cofaces << "\n"
+ << " * Number of not-connected stars: "
+ << num_unconnected_stars << "\n";
+ }
+ }
+
+ if (p_num_wrong_dim_simplices)
+ *p_num_wrong_dim_simplices = num_wrong_dim_simplices;
+ if (p_num_wrong_number_of_cofaces)
+ *p_num_wrong_number_of_cofaces = num_wrong_number_of_cofaces;
+ if (p_num_unconnected_stars)
+ *p_num_unconnected_stars = num_unconnected_stars;
+
+ return ret;
+ }
+
+ private:
+ typedef Simplex_set Complex;
+
+ // graph is an adjacency list
+ typedef boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> Adj_graph;
+ // map that gives to a certain simplex its node in graph and its dimension
+ typedef boost::graph_traits<Adj_graph>::vertex_descriptor Graph_vertex;
+ typedef boost::graph_traits<Adj_graph>::edge_descriptor Graph_edge;
+
+ Complex m_complex;
+}; // class Simplicial_complex
+
+} // namespace internal
+} // namespace tangential_complex
+} // namespace Gudhi
+
+#endif // TANGENTIAL_COMPLEX_SIMPLICIAL_COMPLEX_H_
diff --git a/include/gudhi/Tangential_complex/config.h b/include/gudhi/Tangential_complex/config.h
new file mode 100644
index 00000000..ffefcd6b
--- /dev/null
+++ b/include/gudhi/Tangential_complex/config.h
@@ -0,0 +1,43 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TANGENTIAL_COMPLEX_CONFIG_H_
+#define TANGENTIAL_COMPLEX_CONFIG_H_
+
+#include <cstddef>
+
+// ========================= Debugging & profiling =============================
+// #define GUDHI_TC_PROFILING
+// #define GUDHI_TC_VERY_VERBOSE
+// #define GUDHI_TC_PERFORM_EXTRA_CHECKS
+// #define GUDHI_TC_SHOW_DETAILED_STATS_FOR_INCONSISTENCIES
+
+// ========================= Strategy ==========================================
+#define GUDHI_TC_PERTURB_POSITION
+// #define GUDHI_TC_PERTURB_WEIGHT
+
+// ========================= Parameters ========================================
+
+// PCA will use GUDHI_TC_BASE_VALUE_FOR_PCA^intrinsic_dim points
+const std::size_t GUDHI_TC_BASE_VALUE_FOR_PCA = 5;
+
+#endif // TANGENTIAL_COMPLEX_CONFIG_H_
diff --git a/include/gudhi/Tangential_complex/utilities.h b/include/gudhi/Tangential_complex/utilities.h
new file mode 100644
index 00000000..b2d6d674
--- /dev/null
+++ b/include/gudhi/Tangential_complex/utilities.h
@@ -0,0 +1,195 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TANGENTIAL_COMPLEX_UTILITIES_H_
+#define TANGENTIAL_COMPLEX_UTILITIES_H_
+
+#include <CGAL/Dimension.h>
+#include <CGAL/Combination_enumerator.h>
+#include <CGAL/IO/Triangulation_off_ostream.h>
+
+#include <boost/container/flat_set.hpp>
+
+#include <Eigen/Core>
+#include <Eigen/Eigen>
+
+#include <set>
+#include <vector>
+#include <array>
+#include <fstream>
+#include <atomic>
+#include <cmath> // for std::sqrt
+
+namespace Gudhi {
+namespace tangential_complex {
+namespace internal {
+
+// Provides copy constructors to std::atomic so that
+// it can be used in a vector
+template <typename T>
+struct Atomic_wrapper
+: public std::atomic<T> {
+ typedef std::atomic<T> Base;
+
+ Atomic_wrapper() { }
+
+ Atomic_wrapper(const T &t) : Base(t) { }
+
+ Atomic_wrapper(const std::atomic<T> &a) : Base(a.load()) { }
+
+ Atomic_wrapper(const Atomic_wrapper &other) : Base(other.load()) { }
+
+ Atomic_wrapper &operator=(const T &other) {
+ Base::store(other);
+ return *this;
+ }
+
+ Atomic_wrapper &operator=(const std::atomic<T> &other) {
+ Base::store(other.load());
+ return *this;
+ }
+
+ Atomic_wrapper &operator=(const Atomic_wrapper &other) {
+ Base::store(other.load());
+ return *this;
+ }
+};
+
+// Modifies v in-place
+template <typename K>
+typename K::Vector_d& normalize_vector(typename K::Vector_d& v,
+ K const& k) {
+ v = k.scaled_vector_d_object()(
+ v, typename K::FT(1) / std::sqrt(k.squared_length_d_object()(v)));
+ return v;
+}
+
+template<typename Kernel>
+struct Basis {
+ typedef typename Kernel::FT FT;
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::Vector_d Vector;
+ typedef typename std::vector<Vector>::const_iterator const_iterator;
+
+ std::size_t m_origin;
+ std::vector<Vector> m_vectors;
+
+ std::size_t origin() const {
+ return m_origin;
+ }
+
+ void set_origin(std::size_t o) {
+ m_origin = o;
+ }
+
+ const_iterator begin() const {
+ return m_vectors.begin();
+ }
+
+ const_iterator end() const {
+ return m_vectors.end();
+ }
+
+ std::size_t size() const {
+ return m_vectors.size();
+ }
+
+ Vector& operator[](const std::size_t i) {
+ return m_vectors[i];
+ }
+
+ const Vector& operator[](const std::size_t i) const {
+ return m_vectors[i];
+ }
+
+ void push_back(const Vector& v) {
+ m_vectors.push_back(v);
+ }
+
+ void reserve(const std::size_t s) {
+ m_vectors.reserve(s);
+ }
+
+ Basis() { }
+
+ Basis(std::size_t origin) : m_origin(origin) { }
+
+ Basis(std::size_t origin, const std::vector<Vector>& vectors)
+ : m_origin(origin), m_vectors(vectors) { }
+
+ int dimension() const {
+ return static_cast<int> (m_vectors.size());
+ }
+};
+
+// 1st line: number of points
+// Then one point per line
+template <typename Kernel, typename Point_range>
+std::ostream &export_point_set(
+ Kernel const& k,
+ Point_range const& points,
+ std::ostream & os,
+ const char *coord_separator = " ") {
+ // Kernel functors
+ typename Kernel::Construct_cartesian_const_iterator_d ccci =
+ k.construct_cartesian_const_iterator_d_object();
+
+ os << points.size() << "\n";
+
+ typename Point_range::const_iterator it_p = points.begin();
+ typename Point_range::const_iterator it_p_end = points.end();
+ // For each point p
+ for (; it_p != it_p_end; ++it_p) {
+ for (auto it = ccci(*it_p); it != ccci(*it_p, 0); ++it)
+ os << CGAL::to_double(*it) << coord_separator;
+
+ os << "\n";
+ }
+
+ return os;
+}
+
+// Compute all the k-combinations of elements
+// Output_iterator::value_type must be
+// boost::container::flat_set<std::size_t>
+template <typename Elements_container, typename Output_iterator>
+void combinations(const Elements_container elements, int k,
+ Output_iterator combinations) {
+ std::size_t n = elements.size();
+ std::vector<bool> booleans(n, false);
+ std::fill(booleans.begin() + n - k, booleans.end(), true);
+ do {
+ boost::container::flat_set<std::size_t> combination;
+ typename Elements_container::const_iterator it_elt = elements.begin();
+ for (std::size_t i = 0; i < n; ++i, ++it_elt) {
+ if (booleans[i])
+ combination.insert(*it_elt);
+ }
+ *combinations++ = combination;
+ } while (std::next_permutation(booleans.begin(), booleans.end()));
+}
+
+} // namespace internal
+} // namespace tangential_complex
+} // namespace Gudhi
+
+#endif // TANGENTIAL_COMPLEX_UTILITIES_H_
diff --git a/include/gudhi/Test.h b/include/gudhi/Test.h
deleted file mode 100644
index 6024c822..00000000
--- a/include/gudhi/Test.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* This file is part of the Gudhi Library. The Gudhi library
- * (Geometric Understanding in Higher Dimensions) is a generic C++
- * library for computational topology.
- *
- * Author(s): David Salinas
- *
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#ifndef TEST_H_
-#define TEST_H_
-
-#include <list>
-#include <string>
-#include <vector>
-#include <sstream>
-#include <iostream>
-
-
-#define TEST(a) std::cout << "TEST: " << (a) << std::endl
-#define TESTMSG(a, b) std::cout << "TEST: " << a << b << std::endl
-#define TESTVALUE(a) std::cout << "TEST: " << #a << ": " << a << std::endl
-
-/**
- * Class to perform test
- */
-
-class Test {
- private:
- std::string name;
- bool (*test)();
-
- std::string separation() const {
- return "+++++++++++++++++++++++++++++++++++++++++++++++++\n";
- }
-
- std::string print_between_plus(std::string& s) const {
- std::stringstream res;
- res << "+++++++++++++++++" << s << "+++++++++++++++++\n";
- return res.str();
- }
-
- public:
- Test(std::string name_, bool (*test_)()) {
- name = name_;
- test = test_;
- }
-
- bool run() {
- std::cout << print_between_plus(name);
- return test();
- }
-
- std::string getName() {
- return name;
- }
-};
-
-class Tests {
- private:
- std::list<Test> tests;
-
- public:
- void add(std::string name_, bool (*test_)()) {
- Test test(name_, test_);
- tests.push_back(test);
- }
-
- bool run() {
- bool tests_succesful(true);
- std::vector<bool> res;
- for (Test test : tests) {
- res.push_back(test.run());
- }
- std::cout << "\n\n results of tests : " << std::endl;
- int i = 0;
- for (Test t : tests) {
- std::cout << "Test " << i << " \"" << t.getName() << "\" --> ";
- if (res[i++]) {
- std::cout << "OK" << std::endl;
- } else {
- std::cout << "Fail" << std::endl;
- tests_succesful = false;
- break;
- }
- }
- return tests_succesful;
- }
-};
-
-#endif // TEST_H_
diff --git a/include/gudhi/Witness_complex.h b/include/gudhi/Witness_complex.h
index 489cdf11..63f03687 100644
--- a/include/gudhi/Witness_complex.h
+++ b/include/gudhi/Witness_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): Siargey Kachanovich
*
- * Copyright (C) 2015 INRIA Sophia Antipolis-Méditerranée (France)
+ * Copyright (C) 2015 INRIA (France)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,65 +23,44 @@
#ifndef WITNESS_COMPLEX_H_
#define WITNESS_COMPLEX_H_
-// Needed for the adjacency graph in bad link search
-#include <boost/graph/graph_traits.hpp>
-#include <boost/graph/adjacency_list.hpp>
-#include <boost/graph/connected_components.hpp>
+#include <gudhi/Active_witness/Active_witness.h>
+#include <gudhi/Witness_complex/all_faces_in.h>
-#include <boost/range/size.hpp>
-
-#include <gudhi/distance_functions.h>
-
-#include <algorithm>
#include <utility>
#include <vector>
#include <list>
-#include <set>
-#include <queue>
#include <limits>
-#include <ctime>
-#include <iostream>
namespace Gudhi {
namespace witness_complex {
-// /*
-// * \private
-// \class Witness_complex
-// \brief Constructs the witness complex for the given set of witnesses and landmarks.
-// \ingroup witness_complex
-// */
-template< class SimplicialComplex>
+/**
+ * \private
+ * \class Witness_complex
+ * \brief Constructs (weak) witness complex for a given table of nearest landmarks with respect to witnesses.
+ * \ingroup witness_complex
+ *
+ * \tparam Nearest_landmark_table_ needs to be a range of a range of pairs of nearest landmarks and distances.
+ * The class Nearest_landmark_table_::value_type must be a copiable range.
+ * The range of pairs must admit a member type 'iterator'. The dereference type
+ * of the pair range iterator needs to be 'std::pair<std::size_t, double>'.
+*/
+template< class Nearest_landmark_table_ >
class Witness_complex {
private:
- struct Active_witness {
- int witness_id;
- int landmark_id;
-
- Active_witness(int witness_id_, int landmark_id_)
- : witness_id(witness_id_),
- landmark_id(landmark_id_) { }
- };
-
- private:
- typedef typename SimplicialComplex::Simplex_handle Simplex_handle;
- typedef typename SimplicialComplex::Vertex_handle Vertex_handle;
-
- typedef std::vector< double > Point_t;
- typedef std::vector< Point_t > Point_Vector;
-
- typedef std::vector< Vertex_handle > typeVectorVertex;
- typedef std::pair< typeVectorVertex, Filtration_value> typeSimplex;
- typedef std::pair< Simplex_handle, bool > typePairSimplexBool;
-
- typedef int Witness_id;
- typedef int Landmark_id;
- typedef std::list< Vertex_handle > ActiveWitnessList;
-
- private:
- int nbL_; // Number of landmarks
- SimplicialComplex& sc_; // Simplicial complex
+ typedef typename Nearest_landmark_table_::value_type Nearest_landmark_range;
+ typedef std::size_t Witness_id;
+ typedef std::size_t Landmark_id;
+ typedef std::pair<Landmark_id, double> Id_distance_pair;
+ typedef Active_witness<Id_distance_pair, Nearest_landmark_range> ActiveWitness;
+ typedef std::list< ActiveWitness > ActiveWitnessList;
+ typedef std::vector< Landmark_id > typeVectorVertex;
+ typedef std::vector<Nearest_landmark_range> Nearest_landmark_table_internal;
+ typedef Landmark_id Vertex_handle;
+
+ protected:
+ Nearest_landmark_table_internal nearest_landmark_table_;
public:
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -90,174 +69,136 @@ class Witness_complex {
//@{
- // Witness_range<Closest_landmark_range<Vertex_handle>>
+ Witness_complex() {
+ }
- /*
- * \brief Iterative construction of the witness complex.
- * \details The witness complex is written in sc_ basing on a matrix knn of
- * nearest neighbours of the form {witnesses}x{landmarks}.
- *
- * The type KNearestNeighbors can be seen as
- * Witness_range<Closest_landmark_range<Vertex_handle>>, where
- * Witness_range and Closest_landmark_range are random access ranges.
- *
- * Constructor takes into account at most (dim+1)
- * first landmarks from each landmark range to construct simplices.
- *
- * Landmarks are supposed to be in [0,nbL_-1]
+ /**
+ * \brief Initializes member variables before constructing simplicial complex.
+ * \details Records nearest landmark table.
+ * @param[in] nearest_landmark_table needs to be a range of a range of pairs of nearest landmarks and distances.
+ * The class Nearest_landmark_table_::value_type must be a copiable range.
+ * The range of pairs must admit a member type 'iterator'. The dereference type
+ * of the pair range iterator needs to be 'std::pair<std::size_t, double>'.
*/
- template< typename KNearestNeighbors >
- Witness_complex(KNearestNeighbors const & knn,
- int nbL,
- int dim,
- SimplicialComplex & sc) : nbL_(nbL), sc_(sc) {
- // Construction of the active witness list
- int nbW = boost::size(knn);
- typeVectorVertex vv;
- int counter = 0;
- /* The list of still useful witnesses
- * it will diminuish in the course of iterations
- */
- ActiveWitnessList active_w; // = new ActiveWitnessList();
- for (Vertex_handle i = 0; i != nbL_; ++i) {
- // initial fill of 0-dimensional simplices
- // by doing it we don't assume that landmarks are necessarily witnesses themselves anymore
- counter++;
- vv = {i};
- sc_.insert_simplex(vv);
- // TODO(SK) Error if not inserted : normally no need here though
+
+ Witness_complex(Nearest_landmark_table_ const & nearest_landmark_table)
+ : nearest_landmark_table_(std::begin(nearest_landmark_table), std::end(nearest_landmark_table)) {
+ }
+
+ /** \brief Outputs the (weak) witness complex of relaxation 'max_alpha_square'
+ * in a simplicial complex data structure.
+ * \details The function returns true if the construction is successful and false otherwise.
+ * @param[out] complex Simplicial complex data structure compatible which is a model of
+ * SimplicialComplexForWitness concept.
+ * @param[in] max_alpha_square Maximal squared relaxation parameter.
+ * @param[in] limit_dimension Represents the maximal dimension of the simplicial complex
+ * (default value = no limit).
+ */
+ template < typename SimplicialComplexForWitness >
+ bool create_complex(SimplicialComplexForWitness& complex,
+ double max_alpha_square,
+ std::size_t limit_dimension = std::numeric_limits<std::size_t>::max()) const {
+ if (complex.num_vertices() > 0) {
+ std::cerr << "Witness complex cannot create complex - complex is not empty.\n";
+ return false;
}
- int k = 1; /* current dimension in iterative construction */
- for (int i = 0; i != nbW; ++i)
- active_w.push_back(i);
- while (!active_w.empty() && k < dim) {
- typename ActiveWitnessList::iterator it = active_w.begin();
- while (it != active_w.end()) {
- typeVectorVertex simplex_vector;
- /* THE INSERTION: Checking if all the subfaces are in the simplex tree*/
- bool ok = all_faces_in(knn, *it, k);
- if (ok) {
- for (int i = 0; i != k + 1; ++i)
- simplex_vector.push_back(knn[*it][i]);
- sc_.insert_simplex(simplex_vector);
- // TODO(SK) Error if not inserted : normally no need here though
- ++it;
- } else {
- active_w.erase(it++); // First increase the iterator and then erase the previous element
- }
+ if (max_alpha_square < 0) {
+ std::cerr << "Witness complex cannot create complex - squared relaxation parameter must be non-negative.\n";
+ return false;
+ }
+ if (limit_dimension < 0) {
+ std::cerr << "Witness complex cannot create complex - limit dimension must be non-negative.\n";
+ return false;
+ }
+ ActiveWitnessList active_witnesses;
+ Landmark_id k = 0; /* current dimension in iterative construction */
+ for (auto w : nearest_landmark_table_)
+ active_witnesses.push_back(ActiveWitness(w));
+ while (!active_witnesses.empty() && k <= limit_dimension) {
+ typename ActiveWitnessList::iterator aw_it = active_witnesses.begin();
+ std::vector<Landmark_id> simplex;
+ simplex.reserve(k+1);
+ while (aw_it != active_witnesses.end()) {
+ bool ok = add_all_faces_of_dimension(k,
+ max_alpha_square,
+ std::numeric_limits<double>::infinity(),
+ aw_it->begin(),
+ simplex,
+ complex,
+ aw_it->end());
+ assert(simplex.empty());
+ if (!ok)
+ active_witnesses.erase(aw_it++); // First increase the iterator and then erase the previous element
+ else
+ aw_it++;
}
k++;
}
+ complex.set_dimension(k-1);
+ return true;
}
//@}
private:
- /* \brief Check if the facets of the k-dimensional simplex witnessed
- * by witness witness_id are already in the complex.
- * inserted_vertex is the handle of the (k+1)-th vertex witnessed by witness_id
+ /* \brief Adds recursively all the faces of a certain dimension dim witnessed by the same witness.
+ * Iterator is needed to know until how far we can take landmarks to form simplexes.
+ * simplex is the prefix of the simplexes to insert.
+ * The output value indicates if the witness rests active or not.
*/
- template <typename KNearestNeighbors>
- bool all_faces_in(KNearestNeighbors const &knn, int witness_id, int k) {
- std::vector< Vertex_handle > facet;
- // CHECK ALL THE FACETS
- for (int i = 0; i != k + 1; ++i) {
- facet = {};
- for (int j = 0; j != k + 1; ++j) {
- if (j != i) {
- facet.push_back(knn[witness_id][j]);
+ template < typename SimplicialComplexForWitness >
+ bool add_all_faces_of_dimension(int dim,
+ double alpha2,
+ double norelax_dist2,
+ typename ActiveWitness::iterator curr_l,
+ std::vector<Landmark_id>& simplex,
+ SimplicialComplexForWitness& sc,
+ typename ActiveWitness::iterator end) const {
+ if (curr_l == end)
+ return false;
+ bool will_be_active = false;
+ typename ActiveWitness::iterator l_it = curr_l;
+ if (dim > 0) {
+ for (; l_it != end && l_it->second - alpha2 <= norelax_dist2; ++l_it) {
+ simplex.push_back(l_it->first);
+ if (sc.find(simplex) != sc.null_simplex()) {
+ typename ActiveWitness::iterator next_it = l_it;
+ will_be_active = add_all_faces_of_dimension(dim-1,
+ alpha2,
+ norelax_dist2,
+ ++next_it,
+ simplex,
+ sc,
+ end) || will_be_active;
}
- } // endfor
- if (sc_.find(facet) == sc_.null_simplex())
- return false;
- } // endfor
- return true;
- }
-
- template <typename T>
- static void print_vector(const std::vector<T>& v) {
- std::cout << "[";
- if (!v.empty()) {
- std::cout << *(v.begin());
- for (auto it = v.begin() + 1; it != v.end(); ++it) {
- std::cout << ",";
- std::cout << *it;
+ assert(!simplex.empty());
+ simplex.pop_back();
+ // If norelax_dist is infinity, change to first omitted distance
+ if (l_it->second <= norelax_dist2)
+ norelax_dist2 = l_it->second;
}
- }
- std::cout << "]";
- }
-
- public:
- // /*
- // * \brief Verification if every simplex in the complex is witnessed by witnesses in knn.
- // * \param print_output =true will print the witnesses for each simplex
- // * \remark Added for debugging purposes.
- // */
- template< class KNearestNeighbors >
- bool is_witness_complex(KNearestNeighbors const & knn, bool print_output) {
- for (Simplex_handle sh : sc_.complex_simplex_range()) {
- bool is_witnessed = false;
- typeVectorVertex simplex;
- int nbV = 0; // number of verticed in the simplex
- for (Vertex_handle v : sc_.simplex_vertex_range(sh))
- simplex.push_back(v);
- nbV = simplex.size();
- for (typeVectorVertex w : knn) {
- bool has_vertices = true;
- for (Vertex_handle v : simplex)
- if (std::find(w.begin(), w.begin() + nbV, v) == w.begin() + nbV) {
- has_vertices = false;
- }
- if (has_vertices) {
- is_witnessed = true;
- if (print_output) {
- std::cout << "The simplex ";
- print_vector(simplex);
- std::cout << " is witnessed by the witness ";
- print_vector(w);
- std::cout << std::endl;
- }
- break;
- }
- }
- if (!is_witnessed) {
- if (print_output) {
- std::cout << "The following simplex is not witnessed ";
- print_vector(simplex);
- std::cout << std::endl;
+ } else if (dim == 0) {
+ for (; l_it != end && l_it->second - alpha2 <= norelax_dist2; ++l_it) {
+ simplex.push_back(l_it->first);
+ double filtration_value = 0;
+ // if norelax_dist is infinite, relaxation is 0.
+ if (l_it->second > norelax_dist2)
+ filtration_value = l_it->second - norelax_dist2;
+ if (all_faces_in(simplex, &filtration_value, sc)) {
+ will_be_active = true;
+ sc.insert_simplex(simplex, filtration_value);
}
- assert(is_witnessed);
- return false;
+ assert(!simplex.empty());
+ simplex.pop_back();
+ // If norelax_dist is infinity, change to first omitted distance
+ if (l_it->second < norelax_dist2)
+ norelax_dist2 = l_it->second;
}
}
- return true;
+ return will_be_active;
}
};
- /**
- * \ingroup witness_complex
- * \brief Iterative construction of the witness complex.
- * \details The witness complex is written in simplicial complex sc_
- * basing on a matrix knn of
- * nearest neighbours of the form {witnesses}x{landmarks}.
- *
- * The type KNearestNeighbors can be seen as
- * Witness_range<Closest_landmark_range<Vertex_handle>>, where
- * Witness_range and Closest_landmark_range are random access ranges.
- *
- * Procedure takes into account at most (dim+1)
- * first landmarks from each landmark range to construct simplices.
- *
- * Landmarks are supposed to be in [0,nbL_-1]
- */
- template <class KNearestNeighbors, class SimplicialComplexForWitness>
- void witness_complex(KNearestNeighbors const & knn,
- int nbL,
- int dim,
- SimplicialComplexForWitness & sc) {
- Witness_complex<SimplicialComplexForWitness>(knn, nbL, dim, sc);
- }
-
} // namespace witness_complex
} // namespace Gudhi
diff --git a/include/gudhi/Witness_complex/all_faces_in.h b/include/gudhi/Witness_complex/all_faces_in.h
new file mode 100644
index 00000000..b68d75a1
--- /dev/null
+++ b/include/gudhi/Witness_complex/all_faces_in.h
@@ -0,0 +1,55 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2015 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef WITNESS_COMPLEX_ALL_FACES_IN_H_
+#define WITNESS_COMPLEX_ALL_FACES_IN_H_
+
+/* \brief Check if the facets of the k-dimensional simplex witnessed
+ * by witness witness_id are already in the complex.
+ * inserted_vertex is the handle of the (k+1)-th vertex witnessed by witness_id
+ */
+template < typename SimplicialComplexForWitness,
+ typename Simplex >
+ bool all_faces_in(Simplex& simplex,
+ double* filtration_value,
+ SimplicialComplexForWitness& sc) {
+ typedef typename SimplicialComplexForWitness::Simplex_handle Simplex_handle;
+
+ if (simplex.size() == 1)
+ return true; /* Add vertices unconditionally */
+
+ Simplex facet;
+ for (typename Simplex::iterator not_it = simplex.begin(); not_it != simplex.end(); ++not_it) {
+ facet.clear();
+ for (typename Simplex::iterator it = simplex.begin(); it != simplex.end(); ++it)
+ if (it != not_it)
+ facet.push_back(*it);
+ Simplex_handle facet_sh = sc.find(facet);
+ if (facet_sh == sc.null_simplex())
+ return false;
+ else if (sc.filtration(facet_sh) > *filtration_value)
+ *filtration_value = sc.filtration(facet_sh);
+ }
+ return true;
+ }
+
+#endif // WITNESS_COMPLEX_ALL_FACES_IN_H_
diff --git a/include/gudhi/choose_n_farthest_points.h b/include/gudhi/choose_n_farthest_points.h
new file mode 100644
index 00000000..86500b28
--- /dev/null
+++ b/include/gudhi/choose_n_farthest_points.h
@@ -0,0 +1,133 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CHOOSE_N_FARTHEST_POINTS_H_
+#define CHOOSE_N_FARTHEST_POINTS_H_
+
+#include <boost/range.hpp>
+
+#include <gudhi/Null_output_iterator.h>
+
+#include <iterator>
+#include <vector>
+#include <random>
+#include <limits> // for numeric_limits<>
+
+namespace Gudhi {
+
+namespace subsampling {
+
+/**
+ * \ingroup subsampling
+ */
+enum : std::size_t {
+/**
+ * Argument for `choose_n_farthest_points` to indicate that the starting point should be picked randomly.
+ */
+ random_starting_point = std::size_t(-1)
+};
+
+/**
+ * \ingroup subsampling
+ * \brief Subsample by a greedy strategy of iteratively adding the farthest point from the
+ * current chosen point set to the subsampling.
+ * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`, with a random landmark.
+ * \tparam Kernel must provide a type Kernel::Squared_distance_d which is a model of the
+ * concept <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classKernel__d_1_1Squared__distance__d.html">Kernel_d::Squared_distance_d</a> (despite the name, taken from CGAL, this can be any kind of metric or proximity measure).
+ * It must also contain a public member `squared_distance_d_object()` that returns an object of this type.
+ * \tparam Point_range Range whose value type is Kernel::Point_d. It must provide random-access
+ * via `operator[]` and the points should be stored contiguously in memory.
+ * \tparam PointOutputIterator Output iterator whose value type is Kernel::Point_d.
+ * \tparam DistanceOutputIterator Output iterator for distances.
+ * \details It chooses `final_size` points from a random access range
+ * `input_pts` and outputs them in the output iterator `output_it`. It also
+ * outputs the distance from each of those points to the set of previous
+ * points in `dist_it`.
+ * @param[in] k A kernel object.
+ * @param[in] input_pts Const reference to the input points.
+ * @param[in] final_size The size of the subsample to compute.
+ * @param[in] starting_point The seed in the farthest point algorithm.
+ * @param[out] output_it The output iterator for points.
+ * @param[out] dist_it The optional output iterator for distances.
+ *
+ */
+template < typename Kernel,
+typename Point_range,
+typename PointOutputIterator,
+typename DistanceOutputIterator = Null_output_iterator>
+void choose_n_farthest_points(Kernel const &k,
+ Point_range const &input_pts,
+ std::size_t final_size,
+ std::size_t starting_point,
+ PointOutputIterator output_it,
+ DistanceOutputIterator dist_it = {}) {
+ std::size_t nb_points = boost::size(input_pts);
+ if (final_size > nb_points)
+ final_size = nb_points;
+
+ // Tests to the limit
+ if (final_size < 1)
+ return;
+
+ if (starting_point == random_starting_point) {
+ // Choose randomly the first landmark
+ std::random_device rd;
+ std::mt19937 gen(rd());
+ std::uniform_int_distribution<std::size_t> dis(0, (input_pts.size() - 1));
+ starting_point = dis(gen);
+ }
+
+ typename Kernel::Squared_distance_d sqdist = k.squared_distance_d_object();
+
+ std::size_t current_number_of_landmarks = 0; // counter for landmarks
+ const double infty = std::numeric_limits<double>::infinity(); // infinity (see next entry)
+ std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from input_pts
+
+ std::size_t curr_max_w = starting_point;
+
+ for (current_number_of_landmarks = 0; current_number_of_landmarks != final_size; current_number_of_landmarks++) {
+ // curr_max_w at this point is the next landmark
+ *output_it++ = input_pts[curr_max_w];
+ *dist_it++ = dist_to_L[curr_max_w];
+ std::size_t i = 0;
+ for (auto& p : input_pts) {
+ double curr_dist = sqdist(p, *(std::begin(input_pts) + curr_max_w));
+ if (curr_dist < dist_to_L[i])
+ dist_to_L[i] = curr_dist;
+ ++i;
+ }
+ // choose the next curr_max_w
+ double curr_max_dist = 0; // used for defining the furhest point from L
+ for (i = 0; i < dist_to_L.size(); i++)
+ if (dist_to_L[i] > curr_max_dist) {
+ curr_max_dist = dist_to_L[i];
+ curr_max_w = i;
+ }
+ }
+}
+
+} // namespace subsampling
+
+} // namespace Gudhi
+
+#endif // CHOOSE_N_FARTHEST_POINTS_H_
diff --git a/include/gudhi/console_color.h b/include/gudhi/console_color.h
new file mode 100644
index 00000000..c4671da3
--- /dev/null
+++ b/include/gudhi/console_color.h
@@ -0,0 +1,97 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA Sophia-Antipolis (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CONSOLE_COLOR_H_
+#define CONSOLE_COLOR_H_
+
+#include <iostream>
+
+#if defined(WIN32)
+#include <windows.h>
+#endif
+
+inline std::ostream& blue(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout,
+ FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_INTENSITY);
+#else
+ s << "\x1b[0;34m";
+#endif
+ return s;
+}
+
+inline std::ostream& red(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout, FOREGROUND_RED | FOREGROUND_INTENSITY);
+#else
+ s << "\x1b[0;31m";
+#endif
+ return s;
+}
+
+inline std::ostream& green(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout, FOREGROUND_GREEN | FOREGROUND_INTENSITY);
+#else
+ s << "\x1b[0;32m";
+#endif
+ return s;
+}
+
+inline std::ostream& yellow(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout,
+ FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY);
+#else
+ s << "\x1b[0;33m";
+#endif
+ return s;
+}
+
+inline std::ostream& white(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout,
+ FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
+#else
+ s << "\x1b[0;37m";
+#endif
+ return s;
+}
+
+inline std::ostream& black_on_white(std::ostream &s) {
+#if defined(WIN32)
+ HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleTextAttribute(hStdout,
+ BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE);
+#else
+ s << "\x1b[0;33m";
+#endif
+ return s;
+}
+
+
+#endif // CONSOLE_COLOR_H_
diff --git a/include/gudhi/distance_functions.h b/include/gudhi/distance_functions.h
index cd518581..f6e2ab5a 100644
--- a/include/gudhi/distance_functions.h
+++ b/include/gudhi/distance_functions.h
@@ -4,7 +4,7 @@
*
* Author(s): Clément Maria
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Méditerranée (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,20 +24,32 @@
#define DISTANCE_FUNCTIONS_H_
#include <cmath> // for std::sqrt
+#include <type_traits> // for std::decay
+#include <iterator> // for std::begin, std::end
-/* Compute the Euclidean distance between two Points given
- * by a range of coordinates. The points are assumed to have
- * the same dimension. */
-template< typename Point >
-double euclidean_distance(Point &p1, Point &p2) {
- double dist = 0.;
- auto it1 = p1.begin();
- auto it2 = p2.begin();
- for (; it1 != p1.end(); ++it1, ++it2) {
- double tmp = *it1 - *it2;
- dist += tmp*tmp;
+namespace Gudhi {
+
+/** @file
+ * @brief Global distance functions
+ */
+
+/** @brief Compute the Euclidean distance between two Points given by a range of coordinates. The points are assumed to
+ * have the same dimension. */
+class Euclidean_distance {
+ public:
+ template< typename Point >
+ auto operator()(const Point& p1, const Point& p2) const -> typename std::decay<decltype(*std::begin(p1))>::type {
+ auto it1 = p1.begin();
+ auto it2 = p2.begin();
+ typename Point::value_type dist = 0.;
+ for (; it1 != p1.end(); ++it1, ++it2) {
+ typename Point::value_type tmp = (*it1) - (*it2);
+ dist += tmp*tmp;
+ }
+ return std::sqrt(dist);
}
- return std::sqrt(dist);
-}
+};
+
+} // namespace Gudhi
#endif // DISTANCE_FUNCTIONS_H_
diff --git a/include/gudhi/graph_simplicial_complex.h b/include/gudhi/graph_simplicial_complex.h
index 042ef516..5fe7c826 100644
--- a/include/gudhi/graph_simplicial_complex.h
+++ b/include/gudhi/graph_simplicial_complex.h
@@ -4,7 +4,7 @@
*
* Author(s): Clément Maria
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Méditerranée (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,61 +39,4 @@ struct vertex_filtration_t {
typedef boost::vertex_property_tag kind;
};
-typedef int Vertex_handle;
-typedef double Filtration_value;
-typedef boost::adjacency_list < boost::vecS, boost::vecS, boost::undirectedS
-, boost::property < vertex_filtration_t, Filtration_value >
-, boost::property < edge_filtration_t, Filtration_value >
-> Graph_t;
-typedef std::pair< Vertex_handle, Vertex_handle > Edge_t;
-
-/** \brief Output the proximity graph of the points.
- *
- * If points contains n elements, the proximity graph is the graph
- * with n vertices, and an edge [u,v] iff the distance function between
- * points u and v is smaller than threshold.
- *
- * The type PointCloud furnishes .begin() and .end() methods, that return
- * iterators with value_type Point.
- */
-template< typename PointCloud
-, typename Point >
-Graph_t compute_proximity_graph(PointCloud &points
- , Filtration_value threshold
- , Filtration_value distance(Point p1, Point p2)) {
- std::vector< Edge_t > edges;
- std::vector< Filtration_value > edges_fil;
- std::map< Vertex_handle, Filtration_value > vertices;
-
- Vertex_handle idx_u, idx_v;
- Filtration_value fil;
- idx_u = 0;
- for (auto it_u = points.begin(); it_u != points.end(); ++it_u) {
- idx_v = idx_u + 1;
- for (auto it_v = it_u + 1; it_v != points.end(); ++it_v, ++idx_v) {
- fil = distance(*it_u, *it_v);
- if (fil <= threshold) {
- edges.emplace_back(idx_u, idx_v);
- edges_fil.push_back(fil);
- }
- }
- ++idx_u;
- }
-
- Graph_t skel_graph(edges.begin()
- , edges.end()
- , edges_fil.begin()
- , idx_u); // number of points labeled from 0 to idx_u-1
-
- auto vertex_prop = boost::get(vertex_filtration_t(), skel_graph);
-
- boost::graph_traits<Graph_t>::vertex_iterator vi, vi_end;
- for (std::tie(vi, vi_end) = boost::vertices(skel_graph);
- vi != vi_end; ++vi) {
- boost::put(vertex_prop, *vi, 0.);
- }
-
- return skel_graph;
-}
-
#endif // GRAPH_SIMPLICIAL_COMPLEX_H_
diff --git a/include/gudhi/pick_n_random_points.h b/include/gudhi/pick_n_random_points.h
new file mode 100644
index 00000000..f0e3f1f1
--- /dev/null
+++ b/include/gudhi/pick_n_random_points.h
@@ -0,0 +1,86 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PICK_N_RANDOM_POINTS_H_
+#define PICK_N_RANDOM_POINTS_H_
+
+#include <gudhi/Clock.h>
+
+#include <boost/range/size.hpp>
+
+#include <cstddef>
+#include <random> // random_device, mt19937
+#include <algorithm> // shuffle
+#include <numeric> // iota
+#include <iterator>
+#include <vector>
+
+
+namespace Gudhi {
+
+namespace subsampling {
+
+/**
+ * \ingroup subsampling
+ * \brief Subsample a point set by picking random vertices.
+ *
+ * \details It chooses `final_size` distinct points from a random access range `points`
+ * and outputs them to the output iterator `output_it`.
+ * Point_container::iterator should be ValueSwappable and RandomAccessIterator.
+ */
+template <typename Point_container,
+typename OutputIterator>
+void pick_n_random_points(Point_container const &points,
+ std::size_t final_size,
+ OutputIterator output_it) {
+#ifdef GUDHI_SUBS_PROFILING
+ Gudhi::Clock t;
+#endif
+
+ std::size_t nbP = boost::size(points);
+ if (final_size > nbP)
+ final_size = nbP;
+
+ std::vector<int> landmarks(nbP);
+ std::iota(landmarks.begin(), landmarks.end(), 0);
+
+ std::random_device rd;
+ std::mt19937 g(rd());
+
+ std::shuffle(landmarks.begin(), landmarks.end(), g);
+ landmarks.resize(final_size);
+
+ for (int l : landmarks)
+ *output_it++ = points[l];
+
+#ifdef GUDHI_SUBS_PROFILING
+ t.end();
+ std::cerr << "Random landmark choice took " << t.num_seconds()
+ << " seconds." << std::endl;
+#endif
+}
+
+} // namespace subsampling
+
+} // namespace Gudhi
+
+#endif // PICK_N_RANDOM_POINTS_H_
diff --git a/include/gudhi/random_point_generators.h b/include/gudhi/random_point_generators.h
new file mode 100644
index 00000000..2ec465ef
--- /dev/null
+++ b/include/gudhi/random_point_generators.h
@@ -0,0 +1,474 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RANDOM_POINT_GENERATORS_H_
+#define RANDOM_POINT_GENERATORS_H_
+
+#include <CGAL/number_utils.h>
+#include <CGAL/Random.h>
+#include <CGAL/point_generators_d.h>
+
+#include <vector> // for vector<>
+
+namespace Gudhi {
+
+///////////////////////////////////////////////////////////////////////////////
+// Note: All these functions have been tested with the CGAL::Epick_d kernel
+///////////////////////////////////////////////////////////////////////////////
+
+// construct_point: dim 2
+
+template <typename Kernel>
+typename Kernel::Point_d construct_point(const Kernel &k,
+ typename Kernel::FT x1, typename Kernel::FT x2) {
+ typename Kernel::FT tab[2];
+ tab[0] = x1;
+ tab[1] = x2;
+ return k.construct_point_d_object()(2, &tab[0], &tab[2]);
+}
+
+// construct_point: dim 3
+
+template <typename Kernel>
+typename Kernel::Point_d construct_point(const Kernel &k,
+ typename Kernel::FT x1, typename Kernel::FT x2, typename Kernel::FT x3) {
+ typename Kernel::FT tab[3];
+ tab[0] = x1;
+ tab[1] = x2;
+ tab[2] = x3;
+ return k.construct_point_d_object()(3, &tab[0], &tab[3]);
+}
+
+// construct_point: dim 4
+
+template <typename Kernel>
+typename Kernel::Point_d construct_point(const Kernel &k,
+ typename Kernel::FT x1, typename Kernel::FT x2, typename Kernel::FT x3,
+ typename Kernel::FT x4) {
+ typename Kernel::FT tab[4];
+ tab[0] = x1;
+ tab[1] = x2;
+ tab[2] = x3;
+ tab[3] = x4;
+ return k.construct_point_d_object()(4, &tab[0], &tab[4]);
+}
+
+// construct_point: dim 5
+
+template <typename Kernel>
+typename Kernel::Point_d construct_point(const Kernel &k,
+ typename Kernel::FT x1, typename Kernel::FT x2, typename Kernel::FT x3,
+ typename Kernel::FT x4, typename Kernel::FT x5) {
+ typename Kernel::FT tab[5];
+ tab[0] = x1;
+ tab[1] = x2;
+ tab[2] = x3;
+ tab[3] = x4;
+ tab[4] = x5;
+ return k.construct_point_d_object()(5, &tab[0], &tab[5]);
+}
+
+// construct_point: dim 6
+
+template <typename Kernel>
+typename Kernel::Point_d construct_point(const Kernel &k,
+ typename Kernel::FT x1, typename Kernel::FT x2, typename Kernel::FT x3,
+ typename Kernel::FT x4, typename Kernel::FT x5, typename Kernel::FT x6) {
+ typename Kernel::FT tab[6];
+ tab[0] = x1;
+ tab[1] = x2;
+ tab[2] = x3;
+ tab[3] = x4;
+ tab[4] = x5;
+ tab[5] = x6;
+ return k.construct_point_d_object()(6, &tab[0], &tab[6]);
+}
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_plane(std::size_t num_points, int intrinsic_dim,
+ int ambient_dim,
+ double coord_min = -5., double coord_max = 5.) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ std::vector<FT> pt(ambient_dim, FT(0));
+ for (int j = 0; j < intrinsic_dim; ++j)
+ pt[j] = rng.get_double(coord_min, coord_max);
+
+ Point p = k.construct_point_d_object()(ambient_dim, pt.begin(), pt.end());
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_moment_curve(std::size_t num_points, int dim,
+ typename Kernel::FT min_x,
+ typename Kernel::FT max_x) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ FT x = rng.get_double(min_x, max_x);
+ std::vector<FT> coords;
+ coords.reserve(dim);
+ for (int p = 1; p <= dim; ++p)
+ coords.push_back(std::pow(CGAL::to_double(x), p));
+ Point p = k.construct_point_d_object()(
+ dim, coords.begin(), coords.end());
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+
+// R = big radius, r = small radius
+template <typename Kernel/*, typename TC_basis*/>
+std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t num_points, double R, double r,
+ bool uniform = false) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+
+ // if uniform
+ std::size_t num_lines = (std::size_t)sqrt(num_points);
+
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ FT u, v;
+ if (uniform) {
+ std::size_t k1 = i / num_lines;
+ std::size_t k2 = i % num_lines;
+ u = 6.2832 * k1 / num_lines;
+ v = 6.2832 * k2 / num_lines;
+ } else {
+ u = rng.get_double(0, 6.2832);
+ v = rng.get_double(0, 6.2832);
+ }
+ Point p = construct_point(k,
+ (R + r * std::cos(u)) * std::cos(v),
+ (R + r * std::cos(u)) * std::sin(v),
+ r * std::sin(u));
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+// "Private" function used by generate_points_on_torus_d
+template <typename Kernel, typename OutputIterator>
+static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::size_t num_slices,
+ OutputIterator out,
+ double radius_noise_percentage = 0.,
+ std::vector<typename Kernel::FT> current_point = std::vector<typename Kernel::FT>()) {
+ CGAL::Random rng;
+ int point_size = static_cast<int>(current_point.size());
+ if (point_size == 2 * dim) {
+ *out++ = k.construct_point_d_object()(point_size, current_point.begin(), current_point.end());
+ } else {
+ for (std::size_t slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
+ double radius_noise_ratio = 1.;
+ if (radius_noise_percentage > 0.) {
+ radius_noise_ratio = rng.get_double(
+ (100. - radius_noise_percentage) / 100.,
+ (100. + radius_noise_percentage) / 100.);
+ }
+ std::vector<typename Kernel::FT> cp2 = current_point;
+ double alpha = 6.2832 * slice_idx / num_slices;
+ cp2.push_back(radius_noise_ratio * std::cos(alpha));
+ cp2.push_back(radius_noise_ratio * std::sin(alpha));
+ generate_uniform_points_on_torus_d(
+ k, dim, num_slices, out, radius_noise_percentage, cp2);
+ }
+ }
+}
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num_points, int dim, bool uniform = false,
+ double radius_noise_percentage = 0.) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+
+ std::vector<Point> points;
+ points.reserve(num_points);
+ if (uniform) {
+ std::size_t num_slices = (std::size_t)std::pow(num_points, 1. / dim);
+ generate_uniform_points_on_torus_d(
+ k, dim, num_slices, std::back_inserter(points), radius_noise_percentage);
+ } else {
+ for (std::size_t i = 0; i < num_points;) {
+ double radius_noise_ratio = 1.;
+ if (radius_noise_percentage > 0.) {
+ radius_noise_ratio = rng.get_double(
+ (100. - radius_noise_percentage) / 100.,
+ (100. + radius_noise_percentage) / 100.);
+ }
+ std::vector<typename Kernel::FT> pt;
+ pt.reserve(dim * 2);
+ for (int curdim = 0; curdim < dim; ++curdim) {
+ FT alpha = rng.get_double(0, 6.2832);
+ pt.push_back(radius_noise_ratio * std::cos(alpha));
+ pt.push_back(radius_noise_ratio * std::sin(alpha));
+ }
+
+ Point p = k.construct_point_d_object()(pt.begin(), pt.end());
+ points.push_back(p);
+ ++i;
+ }
+ }
+ return points;
+}
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_sphere_d(std::size_t num_points, int dim, double radius,
+ double radius_noise_percentage = 0.) {
+ typedef typename Kernel::Point_d Point;
+ Kernel k;
+ CGAL::Random rng;
+ CGAL::Random_points_on_sphere_d<Point> generator(dim, radius);
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ Point p = *generator++;
+ if (radius_noise_percentage > 0.) {
+ double radius_noise_ratio = rng.get_double(
+ (100. - radius_noise_percentage) / 100.,
+ (100. + radius_noise_percentage) / 100.);
+
+ typename Kernel::Point_to_vector_d k_pt_to_vec =
+ k.point_to_vector_d_object();
+ typename Kernel::Vector_to_point_d k_vec_to_pt =
+ k.vector_to_point_d_object();
+ typename Kernel::Scaled_vector_d k_scaled_vec =
+ k.scaled_vector_d_object();
+ p = k_vec_to_pt(k_scaled_vec(k_pt_to_vec(p), radius_noise_ratio));
+ }
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_two_spheres_d(std::size_t num_points, int dim, double radius,
+ double distance_between_centers,
+ double radius_noise_percentage = 0.) {
+ typedef typename Kernel::FT FT;
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::Vector_d Vector;
+ Kernel k;
+ CGAL::Random rng;
+ CGAL::Random_points_on_sphere_d<Point> generator(dim, radius);
+ std::vector<Point> points;
+ points.reserve(num_points);
+
+ std::vector<FT> t(dim, FT(0));
+ t[0] = distance_between_centers;
+ Vector c1_to_c2(t.begin(), t.end());
+
+ for (std::size_t i = 0; i < num_points;) {
+ Point p = *generator++;
+ if (radius_noise_percentage > 0.) {
+ double radius_noise_ratio = rng.get_double(
+ (100. - radius_noise_percentage) / 100.,
+ (100. + radius_noise_percentage) / 100.);
+
+ typename Kernel::Point_to_vector_d k_pt_to_vec =
+ k.point_to_vector_d_object();
+ typename Kernel::Vector_to_point_d k_vec_to_pt =
+ k.vector_to_point_d_object();
+ typename Kernel::Scaled_vector_d k_scaled_vec =
+ k.scaled_vector_d_object();
+ p = k_vec_to_pt(k_scaled_vec(k_pt_to_vec(p), radius_noise_ratio));
+ }
+
+ typename Kernel::Translated_point_d k_transl =
+ k.translated_point_d_object();
+ Point p2 = k_transl(p, c1_to_c2);
+ points.push_back(p);
+ points.push_back(p2);
+ i += 2;
+ }
+ return points;
+}
+
+// Product of a 3-sphere and a circle => d = 3 / D = 5
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std::size_t num_points,
+ double sphere_radius) {
+ typedef typename Kernel::FT FT;
+ typedef typename Kernel::Point_d Point;
+ Kernel k;
+ CGAL::Random rng;
+ CGAL::Random_points_on_sphere_d<Point> generator(3, sphere_radius);
+ std::vector<Point> points;
+ points.reserve(num_points);
+
+ typename Kernel::Compute_coordinate_d k_coord =
+ k.compute_coordinate_d_object();
+ for (std::size_t i = 0; i < num_points;) {
+ Point p_sphere = *generator++; // First 3 coords
+
+ FT alpha = rng.get_double(0, 6.2832);
+ std::vector<FT> pt(5);
+ pt[0] = k_coord(p_sphere, 0);
+ pt[1] = k_coord(p_sphere, 1);
+ pt[2] = k_coord(p_sphere, 2);
+ pt[3] = std::cos(alpha);
+ pt[4] = std::sin(alpha);
+ Point p(pt.begin(), pt.end());
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+// a = big radius, b = small radius
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::size_t num_points, double a, double b,
+ bool uniform = false) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+
+ // if uniform
+ std::size_t num_lines = (std::size_t)sqrt(num_points);
+
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ FT u, v;
+ if (uniform) {
+ std::size_t k1 = i / num_lines;
+ std::size_t k2 = i % num_lines;
+ u = 6.2832 * k1 / num_lines;
+ v = 6.2832 * k2 / num_lines;
+ } else {
+ u = rng.get_double(0, 6.2832);
+ v = rng.get_double(0, 6.2832);
+ }
+ double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v);
+ Point p = construct_point(k,
+ (a + b * tmp) * cos(u),
+ (a + b * tmp) * sin(u),
+ b * (sin(u / 2) * sin(v) + cos(u / 2) * sin(2. * v)));
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+// a = big radius, b = small radius
+template <typename Kernel>
+std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_4D(std::size_t num_points, double a, double b,
+ double noise = 0., bool uniform = false) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+
+ // if uniform
+ std::size_t num_lines = (std::size_t)sqrt(num_points);
+
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ FT u, v;
+ if (uniform) {
+ std::size_t k1 = i / num_lines;
+ std::size_t k2 = i % num_lines;
+ u = 6.2832 * k1 / num_lines;
+ v = 6.2832 * k2 / num_lines;
+ } else {
+ u = rng.get_double(0, 6.2832);
+ v = rng.get_double(0, 6.2832);
+ }
+ Point p = construct_point(k,
+ (a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)),
+ (a + b * cos(v)) * sin(u) + (noise == 0. ? 0. : rng.get_double(0, noise)),
+ b * sin(v) * cos(u / 2) + (noise == 0. ? 0. : rng.get_double(0, noise)),
+ b * sin(v) * sin(u / 2) + (noise == 0. ? 0. : rng.get_double(0, noise)));
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+
+// a = big radius, b = small radius
+
+template <typename Kernel>
+std::vector<typename Kernel::Point_d>
+generate_points_on_klein_bottle_variant_5D(
+ std::size_t num_points, double a, double b, bool uniform = false) {
+ typedef typename Kernel::Point_d Point;
+ typedef typename Kernel::FT FT;
+ Kernel k;
+ CGAL::Random rng;
+
+ // if uniform
+ std::size_t num_lines = (std::size_t)sqrt(num_points);
+
+ std::vector<Point> points;
+ points.reserve(num_points);
+ for (std::size_t i = 0; i < num_points;) {
+ FT u, v;
+ if (uniform) {
+ std::size_t k1 = i / num_lines;
+ std::size_t k2 = i % num_lines;
+ u = 6.2832 * k1 / num_lines;
+ v = 6.2832 * k2 / num_lines;
+ } else {
+ u = rng.get_double(0, 6.2832);
+ v = rng.get_double(0, 6.2832);
+ }
+ FT x1 = (a + b * cos(v)) * cos(u);
+ FT x2 = (a + b * cos(v)) * sin(u);
+ FT x3 = b * sin(v) * cos(u / 2);
+ FT x4 = b * sin(v) * sin(u / 2);
+ FT x5 = x1 + x2 + x3 + x4;
+
+ Point p = construct_point(k, x1, x2, x3, x4, x5);
+ points.push_back(p);
+ ++i;
+ }
+ return points;
+}
+
+} // namespace Gudhi
+
+#endif // RANDOM_POINT_GENERATORS_H_
diff --git a/include/gudhi/reader_utils.h b/include/gudhi/reader_utils.h
index 899f9df6..97a87edd 100644
--- a/include/gudhi/reader_utils.h
+++ b/include/gudhi/reader_utils.h
@@ -2,9 +2,9 @@
* (Geometric Understanding in Higher Dimensions) is a generic C++
* library for computational topology.
*
- * Author(s): Clément Maria
+ * Author(s): Clement Maria, Pawel Dlotko
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Méditerranée (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,18 +30,25 @@
#include <iostream>
#include <fstream>
#include <map>
-#include <limits> // for numeric_limits<>
+#include <limits> // for numeric_limits
#include <string>
#include <vector>
+#include <utility> // for pair
+
+// Keep this file tag for Doxygen to parse the code, otherwise, functions are not documented.
+// It is required for global functions and variables.
+
+/** @file
+ * @brief This file includes common file reader for GUDHI
+ */
/**
- * \brief Read a set of points to turn it
- * into a vector< vector<double> > by filling points
+ * @brief Read a set of points to turn it into a vector< vector<double> > by filling points.
*
- * File format: 1 point per line
- * X11 X12 ... X1d
- * X21 X22 ... X2d
- * etc
+ * File format: 1 point per line<br>
+ * X11 X12 ... X1d<br>
+ * X21 X22 ... X2d<br>
+ * etc<br>
*/
inline void read_points(std::string file_name, std::vector< std::vector< double > > & points) {
std::ifstream in_file(file_name.c_str(), std::ios::in);
@@ -66,23 +73,29 @@ inline void read_points(std::string file_name, std::vector< std::vector< double
}
/**
- * \brief Read a graph from a file.
+ * @brief Read a graph from a file.
+ *
+ * \tparam Graph_t Type for the return graph. Must be constructible from iterators on pairs of Vertex_handle
+ * \tparam Filtration_value Type for the value of the read filtration
+ * \tparam Vertex_handle Type for the value of the read vertices
*
- * File format: 1 simplex per line
- * Dim1 X11 X12 ... X1d Fil1
- * Dim2 X21 X22 ... X2d Fil2
- * etc
+ * File format: 1 simplex per line<br>
+ * Dim1 X11 X12 ... X1d Fil1<br>
+ * Dim2 X21 X22 ... X2d Fil2<br>
+ * etc<br>
*
* The vertices must be labeled from 0 to n-1.
* Every simplex must appear exactly once.
* Simplices of dimension more than 1 are ignored.
*/
-inline Graph_t read_graph(std::string file_name) {
+template< typename Graph_t, typename Filtration_value, typename Vertex_handle >
+Graph_t read_graph(std::string file_name) {
std::ifstream in_(file_name.c_str(), std::ios::in);
if (!in_.is_open()) {
std::cerr << "Unable to open file " << file_name << std::endl;
}
+ typedef std::pair< Vertex_handle, Vertex_handle > Edge_t;
std::vector< Edge_t > edges;
std::vector< Filtration_value > edges_fil;
std::map< Vertex_handle, Filtration_value > vertices;
@@ -130,7 +143,7 @@ inline Graph_t read_graph(std::string file_name) {
Graph_t skel_graph(edges.begin(), edges.end(), edges_fil.begin(), vertices.size());
auto vertex_prop = boost::get(vertex_filtration_t(), skel_graph);
- boost::graph_traits<Graph_t>::vertex_iterator vi, vi_end;
+ typename boost::graph_traits<Graph_t>::vertex_iterator vi, vi_end;
auto v_it = vertices.begin();
for (std::tie(vi, vi_end) = boost::vertices(skel_graph); vi != vi_end; ++vi, ++v_it) {
boost::put(vertex_prop, *vi, v_it->second);
@@ -140,12 +153,12 @@ inline Graph_t read_graph(std::string file_name) {
}
/**
- * \brief Read a face from a file.
+ * @brief Read a face from a file.
*
- * File format: 1 simplex per line
- * Dim1 X11 X12 ... X1d Fil1
- * Dim2 X21 X22 ... X2d Fil2
- * etc
+ * File format: 1 simplex per line<br>
+ * Dim1 X11 X12 ... X1d Fil1<br>
+ * Dim2 X21 X22 ... X2d Fil2<br>
+ * etc<br>
*
* The vertices must be labeled from 0 to n-1.
* Every simplex must appear exactly once.
@@ -166,18 +179,16 @@ bool read_simplex(std::istream & in_, std::vector< Vertex_handle > & simplex, Fi
}
/**
- * \brief Read a hasse simplex from a file.
- *
- * File format: 1 simplex per line
- * Dim1 k11 k12 ... k1Dim1 Fil1
- * Dim2 k21 k22 ... k2Dim2 Fil2
- * etc
- *
- * The key of a simplex is its position in the filtration order
- * and also the number of its row in the file.
- * Dimi ki1 ki2 ... kiDimi Fili means that the ith simplex in the
- * filtration has dimension Dimi, filtration value fil1 and simplices with
- * key ki1 ... kiDimi in its boundary.*/
+ * @brief Read a hasse simplex from a file.
+ *
+ * File format: 1 simplex per line<br>
+ * Dim1 k11 k12 ... k1Dim1 Fil1<br>
+ * Dim2 k21 k22 ... k2Dim2 Fil2<br>
+ * etc<br>
+ *
+ * The key of a simplex is its position in the filtration order and also the number of its row in the file.
+ * Dimi ki1 ki2 ... kiDimi Fili means that the ith simplex in the filtration has dimension Dimi, filtration value
+ * fil1 and simplices with key ki1 ... kiDimi in its boundary.*/
template< typename Simplex_key, typename Filtration_value >
bool read_hasse_simplex(std::istream & in_, std::vector< Simplex_key > & boundary, Filtration_value & fil) {
int dim;
@@ -195,4 +206,93 @@ bool read_hasse_simplex(std::istream & in_, std::vector< Simplex_key > & boundar
return true;
}
+/**
+ * @brief Read a lower triangular distance matrix from a csv file. We assume that the .csv store the whole
+ * (square) matrix.
+ *
+ * @author Pawel Dlotko
+ *
+ * Square matrix file format:<br>
+ * 0;D12;...;D1j<br>
+ * D21;0;...;D2j<br>
+ * ...<br>
+ * Dj1;Dj2;...;0<br>
+ *
+ * lower matrix file format:<br>
+ * 0<br>
+ * D21;<br>
+ * D31;D32;<br>
+ * ...<br>
+ * Dj1;Dj2;...;Dj(j-1);<br>
+ *
+ **/
+template< typename Filtration_value >
+std::vector< std::vector< Filtration_value > > read_lower_triangular_matrix_from_csv_file(const std::string& filename,
+ const char separator = ';') {
+#ifdef DEBUG_TRACES
+ std::cout << "Using procedure read_lower_triangular_matrix_from_csv_file \n";
+#endif // DEBUG_TRACES
+ std::vector< std::vector< Filtration_value > > result;
+ std::ifstream in;
+ in.open(filename.c_str());
+ if (!in.is_open()) {
+ return result;
+ }
+
+ std::string line;
+
+ // the first line is emtpy, so we ignore it:
+ std::getline(in, line);
+ std::vector< Filtration_value > values_in_this_line;
+ result.push_back(values_in_this_line);
+
+ int number_of_line = 0;
+
+ // first, read the file line by line to a string:
+ while (std::getline(in, line)) {
+ // if line is empty, break
+ if (line.size() == 0)
+ break;
+
+ // if the last element of a string is comma:
+ if (line[ line.size() - 1 ] == separator) {
+ // then shrink the string by one
+ line.pop_back();
+ }
+
+ // replace all commas with spaces
+ std::replace(line.begin(), line.end(), separator, ' ');
+
+ // put the new line to a stream
+ std::istringstream iss(line);
+ // and now read the doubles.
+
+ int number_of_entry = 0;
+ std::vector< Filtration_value > values_in_this_line;
+ while (iss.good()) {
+ double entry;
+ iss >> entry;
+ if (number_of_entry <= number_of_line) {
+ values_in_this_line.push_back(entry);
+ }
+ ++number_of_entry;
+ }
+ if (!values_in_this_line.empty())result.push_back(values_in_this_line);
+ ++number_of_line;
+ }
+ in.close();
+
+#ifdef DEBUG_TRACES
+ std::cerr << "Here is the matrix we read : \n";
+ for (size_t i = 0; i != result.size(); ++i) {
+ for (size_t j = 0; j != result[i].size(); ++j) {
+ std::cerr << result[i][j] << " ";
+ }
+ std::cerr << std::endl;
+ }
+#endif // DEBUG_TRACES
+
+ return result;
+} // read_lower_triangular_matrix_from_csv_file
+
#endif // READER_UTILS_H_
diff --git a/include/gudhi/sparsify_point_set.h b/include/gudhi/sparsify_point_set.h
new file mode 100644
index 00000000..507f8c79
--- /dev/null
+++ b/include/gudhi/sparsify_point_set.h
@@ -0,0 +1,113 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Clement Jamin
+ *
+ * Copyright (C) 2016 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SPARSIFY_POINT_SET_H_
+#define SPARSIFY_POINT_SET_H_
+
+#include <gudhi/Kd_tree_search.h>
+#ifdef GUDHI_SUBSAMPLING_PROFILING
+#include <gudhi/Clock.h>
+#endif
+
+#include <cstddef>
+#include <vector>
+
+namespace Gudhi {
+
+namespace subsampling {
+
+/**
+ * \ingroup subsampling
+ * \brief Outputs a subset of the input points so that the
+ * squared distance between any two points
+ * is greater than or equal to `min_squared_dist`.
+ *
+ * \tparam Kernel must be a model of the <a target="_blank"
+ * href="http://doc.cgal.org/latest/Spatial_searching/classSearchTraits.html">SearchTraits</a>
+ * concept, such as the <a target="_blank"
+ * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a> class, which
+ * can be static if you know the ambiant dimension at compile-time, or dynamic if you don't.
+ * \tparam Point_range Range whose value type is Kernel::Point_d. It must provide random-access
+ * via `operator[]` and the points should be stored contiguously in memory.
+ * \tparam OutputIterator Output iterator whose value type is Kernel::Point_d.
+ *
+ * @param[in] k A kernel object.
+ * @param[in] input_pts Const reference to the input points.
+ * @param[in] min_squared_dist Minimum squared distance separating the output points.
+ * @param[out] output_it The output iterator.
+ */
+template <typename Kernel, typename Point_range, typename OutputIterator>
+void
+sparsify_point_set(
+ const Kernel &k, Point_range const& input_pts,
+ typename Kernel::FT min_squared_dist,
+ OutputIterator output_it) {
+ typedef typename Gudhi::spatial_searching::Kd_tree_search<
+ Kernel, Point_range> Points_ds;
+
+#ifdef GUDHI_SUBSAMPLING_PROFILING
+ Gudhi::Clock t;
+#endif
+
+ Points_ds points_ds(input_pts);
+
+ std::vector<bool> dropped_points(input_pts.size(), false);
+
+ // Parse the input points, and add them if they are not too close to
+ // the other points
+ std::size_t pt_idx = 0;
+ for (typename Point_range::const_iterator it_pt = input_pts.begin();
+ it_pt != input_pts.end();
+ ++it_pt, ++pt_idx) {
+ if (dropped_points[pt_idx])
+ continue;
+
+ *output_it++ = *it_pt;
+
+ auto ins_range = points_ds.query_incremental_nearest_neighbors(*it_pt);
+
+ // If another point Q is closer that min_squared_dist, mark Q to be dropped
+ for (auto const& neighbor : ins_range) {
+ std::size_t neighbor_point_idx = neighbor.first;
+ // If the neighbor is too close, we drop the neighbor
+ if (neighbor.second < min_squared_dist) {
+ // N.B.: If neighbor_point_idx < pt_idx,
+ // dropped_points[neighbor_point_idx] is already true but adding a
+ // test doesn't make things faster, so why bother?
+ dropped_points[neighbor_point_idx] = true;
+ } else {
+ break;
+ }
+ }
+ }
+
+#ifdef GUDHI_SUBSAMPLING_PROFILING
+ t.end();
+ std::cerr << "Point set sparsified in " << t.num_seconds()
+ << " seconds." << std::endl;
+#endif
+}
+
+} // namespace subsampling
+} // namespace Gudhi
+
+#endif // SPARSIFY_POINT_SET_H_