summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorGard Spreemann <gspreemann@gmail.com>2018-02-02 13:51:45 +0100
committerGard Spreemann <gspreemann@gmail.com>2018-02-02 13:51:45 +0100
commit9899ae167f281d10b1684dfcd02c6838c5bf28df (patch)
treeceda62a40a9a8f731298832b1b4ab44ab0dd3a10 /include
parent866f6ce614e9c09c97fed12c8c0c2c9fb84fad3f (diff)
GUDHI 2.1.0 as released by upstream in a tarball.upstream/2.1.0
Diffstat (limited to 'include')
-rw-r--r--include/gudhi/Alpha_complex.h4
-rw-r--r--include/gudhi/Bitmap_cubical_complex.h130
-rw-r--r--include/gudhi/Bitmap_cubical_complex/counter.h13
-rw-r--r--include/gudhi/Bitmap_cubical_complex_base.h419
-rw-r--r--include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h200
-rw-r--r--include/gudhi/Bottleneck.h4
-rw-r--r--include/gudhi/Debug_utils.h4
-rw-r--r--include/gudhi/Edge_contraction.h4
-rw-r--r--include/gudhi/GIC.h1298
-rw-r--r--include/gudhi/Kd_tree_search.h3
-rw-r--r--include/gudhi/Neighbors_finder.h3
-rw-r--r--include/gudhi/PSSK.h168
-rw-r--r--include/gudhi/Persistence_heat_maps.h919
-rw-r--r--include/gudhi/Persistence_intervals.h570
-rw-r--r--include/gudhi/Persistence_intervals_with_distances.h63
-rw-r--r--include/gudhi/Persistence_landscape.h1376
-rw-r--r--include/gudhi/Persistence_landscape_on_grid.h1348
-rw-r--r--include/gudhi/Persistence_vectors.h640
-rw-r--r--include/gudhi/Simplex_tree.h362
-rw-r--r--include/gudhi/Simplex_tree/Simplex_tree_iterators.h32
-rw-r--r--include/gudhi/Skeleton_blocker.h3
-rw-r--r--include/gudhi/Strong_witness_complex.h7
-rw-r--r--include/gudhi/Tangential_complex.h2
-rw-r--r--include/gudhi/Unitary_tests_utils.h40
-rw-r--r--include/gudhi/Witness_complex.h1
-rw-r--r--include/gudhi/choose_n_farthest_points.h4
-rw-r--r--include/gudhi/common_persistence_representations.h127
-rw-r--r--include/gudhi/distance_functions.h32
-rw-r--r--include/gudhi/graph_simplicial_complex.h63
-rw-r--r--include/gudhi/read_persistence_from_file.h120
30 files changed, 7511 insertions, 448 deletions
diff --git a/include/gudhi/Alpha_complex.h b/include/gudhi/Alpha_complex.h
index 1ff95c3d..63c6675c 100644
--- a/include/gudhi/Alpha_complex.h
+++ b/include/gudhi/Alpha_complex.h
@@ -175,7 +175,7 @@ class Alpha_complex {
*
* @return The number of vertices.
*/
- const std::size_t number_of_vertices() const {
+ std::size_t number_of_vertices() const {
return vertex_handle_to_iterator_.size();
}
@@ -268,8 +268,6 @@ class Alpha_complex {
return false; // ----- >>
}
- complex.set_dimension(triangulation_->maximal_dimension());
-
// --------------------------------------------------------------------------------------------
// Simplex_tree construction from loop on triangulation finite full cells list
if (triangulation_->number_of_vertices() > 0) {
diff --git a/include/gudhi/Bitmap_cubical_complex.h b/include/gudhi/Bitmap_cubical_complex.h
index f395de65..969daba6 100644
--- a/include/gudhi/Bitmap_cubical_complex.h
+++ b/include/gudhi/Bitmap_cubical_complex.h
@@ -31,10 +31,11 @@
#endif
#include <limits>
-#include <utility> // for pair<>
+#include <utility> // for pair<>
#include <algorithm> // for sort
#include <vector>
#include <numeric> // for iota
+#include <cstddef>
namespace Gudhi {
@@ -43,7 +44,8 @@ namespace cubical_complex {
// global variable, was used just for debugging.
const bool globalDbg = false;
-template <typename T> class is_before_in_filtration;
+template <typename T>
+class is_before_in_filtration;
/**
* @brief Cubical complex represented as a bitmap.
@@ -60,11 +62,10 @@ class Bitmap_cubical_complex : public T {
//*********************************************//
// Typedefs and typenames
//*********************************************//
- typedef size_t Simplex_key;
+ typedef std::size_t Simplex_key;
typedef typename T::filtration_type Filtration_value;
typedef Simplex_key Simplex_handle;
-
//*********************************************//
// Constructors
//*********************************************//
@@ -77,12 +78,12 @@ class Bitmap_cubical_complex : public T {
/**
* Constructor form a Perseus-style file.
**/
- Bitmap_cubical_complex(const char* perseus_style_file) :
- T(perseus_style_file), key_associated_to_simplex(this->total_number_of_cells + 1) {
+ Bitmap_cubical_complex(const char* perseus_style_file)
+ : T(perseus_style_file), key_associated_to_simplex(this->total_number_of_cells + 1) {
if (globalDbg) {
std::cerr << "Bitmap_cubical_complex( const char* perseus_style_file )\n";
}
- for (size_t i = 0; i != this->total_number_of_cells; ++i) {
+ for (std::size_t i = 0; i != this->total_number_of_cells; ++i) {
this->key_associated_to_simplex[i] = i;
}
// we initialize this only once, in each constructor, when the bitmap is constructed.
@@ -97,10 +98,9 @@ class Bitmap_cubical_complex : public T {
* with filtration on top dimensional cells.
**/
Bitmap_cubical_complex(const std::vector<unsigned>& dimensions,
- const std::vector<Filtration_value>& top_dimensional_cells) :
- T(dimensions, top_dimensional_cells),
- key_associated_to_simplex(this->total_number_of_cells + 1) {
- for (size_t i = 0; i != this->total_number_of_cells; ++i) {
+ const std::vector<Filtration_value>& top_dimensional_cells)
+ : T(dimensions, top_dimensional_cells), key_associated_to_simplex(this->total_number_of_cells + 1) {
+ for (std::size_t i = 0; i != this->total_number_of_cells; ++i) {
this->key_associated_to_simplex[i] = i;
}
// we initialize this only once, in each constructor, when the bitmap is constructed.
@@ -118,10 +118,10 @@ class Bitmap_cubical_complex : public T {
**/
Bitmap_cubical_complex(const std::vector<unsigned>& dimensions,
const std::vector<Filtration_value>& top_dimensional_cells,
- std::vector< bool > directions_in_which_periodic_b_cond_are_to_be_imposed) :
- T(dimensions, top_dimensional_cells, directions_in_which_periodic_b_cond_are_to_be_imposed),
- key_associated_to_simplex(this->total_number_of_cells + 1) {
- for (size_t i = 0; i != this->total_number_of_cells; ++i) {
+ std::vector<bool> directions_in_which_periodic_b_cond_are_to_be_imposed)
+ : T(dimensions, top_dimensional_cells, directions_in_which_periodic_b_cond_are_to_be_imposed),
+ key_associated_to_simplex(this->total_number_of_cells + 1) {
+ for (std::size_t i = 0; i != this->total_number_of_cells; ++i) {
this->key_associated_to_simplex[i] = i;
}
// we initialize this only once, in each constructor, when the bitmap is constructed.
@@ -142,9 +142,7 @@ class Bitmap_cubical_complex : public T {
/**
* Returns number of all cubes in the complex.
**/
- size_t num_simplices()const {
- return this->total_number_of_cells;
- }
+ std::size_t num_simplices() const { return this->total_number_of_cells; }
/**
* Returns a Simplex_handle to a cube that do not exist in this complex.
@@ -159,14 +157,12 @@ class Bitmap_cubical_complex : public T {
/**
* Returns dimension of the complex.
**/
- inline size_t dimension()const {
- return this->sizes.size();
- }
+ inline std::size_t dimension() const { return this->sizes.size(); }
/**
* Return dimension of a cell pointed by the Simplex_handle.
**/
- inline unsigned dimension(Simplex_handle sh)const {
+ inline unsigned dimension(Simplex_handle sh) const {
if (globalDbg) {
std::cerr << "unsigned dimension(const Simplex_handle& sh)\n";
}
@@ -199,7 +195,7 @@ class Bitmap_cubical_complex : public T {
/**
* Return the key of a cube pointed by the Simplex_handle.
**/
- Simplex_key key(Simplex_handle sh)const {
+ Simplex_key key(Simplex_handle sh) const {
if (globalDbg) {
std::cerr << "Simplex_key key(const Simplex_handle& sh)\n";
}
@@ -217,7 +213,7 @@ class Bitmap_cubical_complex : public T {
std::cerr << "Simplex_handle simplex(Simplex_key key)\n";
}
if (key != null_key()) {
- return this->simplex_associated_to_key[ key ];
+ return this->simplex_associated_to_key[key];
}
return null_simplex();
}
@@ -246,8 +242,8 @@ class Bitmap_cubical_complex : public T {
/**
* Boundary_simplex_range class provides ranges for boundary iterators.
**/
- typedef typename std::vector< Simplex_handle >::iterator Boundary_simplex_iterator;
- typedef typename std::vector< Simplex_handle > Boundary_simplex_range;
+ typedef typename std::vector<Simplex_handle>::iterator Boundary_simplex_iterator;
+ typedef typename std::vector<Simplex_handle> Boundary_simplex_range;
/**
* Filtration_simplex_iterator class provides an iterator though the whole structure in the order of filtration.
@@ -257,13 +253,13 @@ class Bitmap_cubical_complex : public T {
**/
class Filtration_simplex_range;
- class Filtration_simplex_iterator : std::iterator< std::input_iterator_tag, Simplex_handle > {
+ class Filtration_simplex_iterator : std::iterator<std::input_iterator_tag, Simplex_handle> {
// Iterator over all simplices of the complex in the order of the indexing scheme.
// 'value_type' must be 'Simplex_handle'.
public:
- Filtration_simplex_iterator(Bitmap_cubical_complex* b) : b(b), position(0) { }
+ Filtration_simplex_iterator(Bitmap_cubical_complex* b) : b(b), position(0) {}
- Filtration_simplex_iterator() : b(NULL), position(0) { }
+ Filtration_simplex_iterator() : b(NULL), position(0) {}
Filtration_simplex_iterator operator++() {
if (globalDbg) {
@@ -288,14 +284,14 @@ class Bitmap_cubical_complex : public T {
return (*this);
}
- bool operator==(const Filtration_simplex_iterator& rhs)const {
+ bool operator==(const Filtration_simplex_iterator& rhs) const {
if (globalDbg) {
std::cerr << "bool operator == ( const Filtration_simplex_iterator& rhs )\n";
}
- return ( this->position == rhs.position);
+ return (this->position == rhs.position);
}
- bool operator!=(const Filtration_simplex_iterator& rhs)const {
+ bool operator!=(const Filtration_simplex_iterator& rhs) const {
if (globalDbg) {
std::cerr << "bool operator != ( const Filtration_simplex_iterator& rhs )\n";
}
@@ -306,14 +302,14 @@ class Bitmap_cubical_complex : public T {
if (globalDbg) {
std::cerr << "Simplex_handle operator*()\n";
}
- return this->b->simplex_associated_to_key[ this->position ];
+ return this->b->simplex_associated_to_key[this->position];
}
friend class Filtration_simplex_range;
private:
Bitmap_cubical_complex<T>* b;
- size_t position;
+ std::size_t position;
};
/**
@@ -326,7 +322,7 @@ class Bitmap_cubical_complex : public T {
typedef Filtration_simplex_iterator const_iterator;
typedef Filtration_simplex_iterator iterator;
- Filtration_simplex_range(Bitmap_cubical_complex<T>* b) : b(b) { }
+ Filtration_simplex_range(Bitmap_cubical_complex<T>* b) : b(b) {}
Filtration_simplex_iterator begin() {
if (globalDbg) {
@@ -348,8 +344,6 @@ class Bitmap_cubical_complex : public T {
Bitmap_cubical_complex<T>* b;
};
-
-
//*********************************************//
// Methods to access iterators from the container:
@@ -357,9 +351,7 @@ class Bitmap_cubical_complex : public T {
* boundary_simplex_range creates an object of a Boundary_simplex_range class
* that provides ranges for the Boundary_simplex_iterator.
**/
- Boundary_simplex_range boundary_simplex_range(Simplex_handle sh) {
- return this->get_boundary_of_a_cell(sh);
- }
+ Boundary_simplex_range boundary_simplex_range(Simplex_handle sh) { return this->get_boundary_of_a_cell(sh); }
/**
* filtration_simplex_range creates an object of a Filtration_simplex_range class
@@ -374,8 +366,6 @@ class Bitmap_cubical_complex : public T {
}
//*********************************************//
-
-
//*********************************************//
// Elements which are in Gudhi now, but I (and in all the cases I asked also Marc) do not understand why they are
// there.
@@ -390,43 +380,41 @@ class Bitmap_cubical_complex : public T {
* Function needed for compatibility with Gudhi. Not useful for other purposes.
**/
std::pair<Simplex_handle, Simplex_handle> endpoints(Simplex_handle sh) {
- std::vector< size_t > bdry = this->get_boundary_of_a_cell(sh);
+ std::vector<std::size_t> bdry = this->get_boundary_of_a_cell(sh);
if (globalDbg) {
std::cerr << "std::pair<Simplex_handle, Simplex_handle> endpoints( Simplex_handle sh )\n";
std::cerr << "bdry.size() : " << bdry.size() << std::endl;
}
// this method returns two first elements from the boundary of sh.
if (bdry.size() < 2)
- throw("Error in endpoints in Bitmap_cubical_complex class. The cell have less than two elements in the "
- "boundary.");
+ throw(
+ "Error in endpoints in Bitmap_cubical_complex class. The cell have less than two elements in the "
+ "boundary.");
return std::make_pair(bdry[0], bdry[1]);
}
-
/**
* Class needed for compatibility with Gudhi. Not useful for other purposes.
**/
class Skeleton_simplex_range;
- class Skeleton_simplex_iterator : std::iterator< std::input_iterator_tag, Simplex_handle > {
+ class Skeleton_simplex_iterator : std::iterator<std::input_iterator_tag, Simplex_handle> {
// Iterator over all simplices of the complex in the order of the indexing scheme.
// 'value_type' must be 'Simplex_handle'.
public:
- Skeleton_simplex_iterator(Bitmap_cubical_complex* b, size_t d) : b(b), dimension(d) {
+ Skeleton_simplex_iterator(Bitmap_cubical_complex* b, std::size_t d) : b(b), dimension(d) {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , size_t d )\n";
+ std::cerr << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n";
}
// find the position of the first simplex of a dimension d
this->position = 0;
- while (
- (this->position != b->data.size()) &&
- (this->b->get_dimension_of_a_cell(this->position) != this->dimension)
- ) {
+ while ((this->position != b->data.size()) &&
+ (this->b->get_dimension_of_a_cell(this->position) != this->dimension)) {
++this->position;
}
}
- Skeleton_simplex_iterator() : b(NULL), position(0), dimension(0) { }
+ Skeleton_simplex_iterator() : b(NULL), position(0), dimension(0) {}
Skeleton_simplex_iterator operator++() {
if (globalDbg) {
@@ -434,10 +422,8 @@ class Bitmap_cubical_complex : public T {
}
// increment the position as long as you did not get to the next element of the dimension dimension.
++this->position;
- while (
- (this->position != this->b->data.size()) &&
- (this->b->get_dimension_of_a_cell(this->position) != this->dimension)
- ) {
+ while ((this->position != this->b->data.size()) &&
+ (this->b->get_dimension_of_a_cell(this->position) != this->dimension)) {
++this->position;
}
return (*this);
@@ -459,14 +445,14 @@ class Bitmap_cubical_complex : public T {
return (*this);
}
- bool operator==(const Skeleton_simplex_iterator& rhs)const {
+ bool operator==(const Skeleton_simplex_iterator& rhs) const {
if (globalDbg) {
std::cerr << "bool operator ==\n";
}
- return ( this->position == rhs.position);
+ return (this->position == rhs.position);
}
- bool operator!=(const Skeleton_simplex_iterator& rhs)const {
+ bool operator!=(const Skeleton_simplex_iterator& rhs) const {
if (globalDbg) {
std::cerr << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n";
}
@@ -481,9 +467,10 @@ class Bitmap_cubical_complex : public T {
}
friend class Skeleton_simplex_range;
+
private:
Bitmap_cubical_complex<T>* b;
- size_t position;
+ std::size_t position;
unsigned dimension;
};
@@ -497,7 +484,7 @@ class Bitmap_cubical_complex : public T {
typedef Skeleton_simplex_iterator const_iterator;
typedef Skeleton_simplex_iterator iterator;
- Skeleton_simplex_range(Bitmap_cubical_complex<T>* b, unsigned dimension) : b(b), dimension(dimension) { }
+ Skeleton_simplex_range(Bitmap_cubical_complex<T>* b, unsigned dimension) : b(b), dimension(dimension) {}
Skeleton_simplex_iterator begin() {
if (globalDbg) {
@@ -533,8 +520,8 @@ class Bitmap_cubical_complex : public T {
friend class is_before_in_filtration<T>;
protected:
- std::vector< size_t > key_associated_to_simplex;
- std::vector< size_t > simplex_associated_to_key;
+ std::vector<std::size_t> key_associated_to_simplex;
+ std::vector<std::size_t> simplex_associated_to_key;
}; // Bitmap_cubical_complex
template <typename T>
@@ -542,7 +529,7 @@ void Bitmap_cubical_complex<T>::initialize_simplex_associated_to_key() {
if (globalDbg) {
std::cerr << "void Bitmap_cubical_complex<T>::initialize_elements_ordered_according_to_filtration() \n";
}
- this->simplex_associated_to_key = std::vector<size_t>(this->data.size());
+ this->simplex_associated_to_key = std::vector<std::size_t>(this->data.size());
std::iota(std::begin(simplex_associated_to_key), std::end(simplex_associated_to_key), 0);
#ifdef GUDHI_USE_TBB
tbb::parallel_sort(simplex_associated_to_key.begin(), simplex_associated_to_key.end(),
@@ -552,16 +539,15 @@ void Bitmap_cubical_complex<T>::initialize_simplex_associated_to_key() {
#endif
// we still need to deal here with a key_associated_to_simplex:
- for ( size_t i = 0 ; i != simplex_associated_to_key.size() ; ++i ) {
- this->key_associated_to_simplex[ simplex_associated_to_key[i] ] = i;
+ for (std::size_t i = 0; i != simplex_associated_to_key.size(); ++i) {
+ this->key_associated_to_simplex[simplex_associated_to_key[i]] = i;
}
}
template <typename T>
class is_before_in_filtration {
public:
- explicit is_before_in_filtration(Bitmap_cubical_complex<T> * CC)
- : CC_(CC) { }
+ explicit is_before_in_filtration(Bitmap_cubical_complex<T>* CC) : CC_(CC) {}
bool operator()(const typename Bitmap_cubical_complex<T>::Simplex_handle& sh1,
const typename Bitmap_cubical_complex<T>::Simplex_handle& sh2) const {
@@ -573,8 +559,8 @@ class is_before_in_filtration {
return fil1 < fil2;
}
// in this case they are on the same filtration level, so the dimension decide.
- size_t dim1 = CC_->get_dimension_of_a_cell(sh1);
- size_t dim2 = CC_->get_dimension_of_a_cell(sh2);
+ std::size_t dim1 = CC_->get_dimension_of_a_cell(sh1);
+ std::size_t dim2 = CC_->get_dimension_of_a_cell(sh2);
if (dim1 != dim2) {
return dim1 < dim2;
}
diff --git a/include/gudhi/Bitmap_cubical_complex/counter.h b/include/gudhi/Bitmap_cubical_complex/counter.h
index 4b072f10..705b68a0 100644
--- a/include/gudhi/Bitmap_cubical_complex/counter.h
+++ b/include/gudhi/Bitmap_cubical_complex/counter.h
@@ -25,6 +25,7 @@
#include <iostream>
#include <vector>
+#include <cstddef>
namespace Gudhi {
@@ -63,14 +64,14 @@ class counter {
* If the value of the function is false, that means, that the counter have reached its end-value.
**/
bool increment() {
- size_t i = 0;
+ std::size_t i = 0;
while ((i != this->end.size()) && (this->current[i] == this->end[i])) {
++i;
}
if (i == this->end.size())return false;
++this->current[i];
- for (size_t j = 0; j != i; ++j) {
+ for (std::size_t j = 0; j != i; ++j) {
this->current[j] = this->begin[j];
}
return true;
@@ -80,7 +81,7 @@ class counter {
* Function to check if we are at the end of counter.
**/
bool isFinal() {
- for (size_t i = 0; i != this->current.size(); ++i) {
+ for (std::size_t i = 0; i != this->current.size(); ++i) {
if (this->current[i] == this->end[i])return true;
}
return false;
@@ -93,7 +94,7 @@ class counter {
**/
std::vector< unsigned > find_opposite(const std::vector< bool >& directionsForPeriodicBCond) {
std::vector< unsigned > result;
- for (size_t i = 0; i != this->current.size(); ++i) {
+ for (std::size_t i = 0; i != this->current.size(); ++i) {
if ((this->current[i] == this->end[i]) && (directionsForPeriodicBCond[i] == true)) {
result.push_back(this->begin[i]);
} else {
@@ -108,7 +109,7 @@ class counter {
**/
std::vector< bool > directions_of_finals() {
std::vector< bool > result;
- for (size_t i = 0; i != this->current.size(); ++i) {
+ for (std::size_t i = 0; i != this->current.size(); ++i) {
if (this->current[i] == this->end[i]) {
result.push_back(true);
} else {
@@ -123,7 +124,7 @@ class counter {
**/
friend std::ostream& operator<<(std::ostream& out, const counter& c) {
// std::cerr << "c.current.size() : " << c.current.size() << endl;
- for (size_t i = 0; i != c.current.size(); ++i) {
+ for (std::size_t i = 0; i != c.current.size(); ++i) {
out << c.current[i] << " ";
}
return out;
diff --git a/include/gudhi/Bitmap_cubical_complex_base.h b/include/gudhi/Bitmap_cubical_complex_base.h
index 0442ac34..bf257be1 100644
--- a/include/gudhi/Bitmap_cubical_complex_base.h
+++ b/include/gudhi/Bitmap_cubical_complex_base.h
@@ -32,7 +32,9 @@
#include <algorithm>
#include <iterator>
#include <limits>
-#include <utility> // for pair<>
+#include <utility>
+#include <stdexcept>
+#include <cstddef>
namespace Gudhi {
@@ -65,8 +67,7 @@ class Bitmap_cubical_complex_base {
/**
*Default constructor
**/
- Bitmap_cubical_complex_base() :
- total_number_of_cells(0) { }
+ Bitmap_cubical_complex_base() : total_number_of_cells(0) {}
/**
* There are a few constructors of a Bitmap_cubical_complex_base class.
* First one, that takes vector<unsigned>, creates an empty bitmap of a dimension equal
@@ -90,7 +91,7 @@ class Bitmap_cubical_complex_base {
/**
* Destructor of the Bitmap_cubical_complex_base class.
**/
- virtual ~Bitmap_cubical_complex_base() { }
+ virtual ~Bitmap_cubical_complex_base() {}
/**
* The functions get_boundary_of_a_cell, get_coboundary_of_a_cell, get_dimension_of_a_cell
@@ -100,8 +101,10 @@ class Bitmap_cubical_complex_base {
* non-negative integer, indicating a position of a cube in the data structure.
* In the case of functions that compute (co)boundary, the output is a vector if non-negative integers pointing to
* the positions of (co)boundary element of the input cell.
+ * The boundary elements are guaranteed to be returned so that the
+ * incidence coefficients of boundary elements are alternating.
*/
- virtual inline std::vector< size_t > get_boundary_of_a_cell(size_t cell)const;
+ virtual inline std::vector<std::size_t> get_boundary_of_a_cell(std::size_t cell) const;
/**
* The functions get_coboundary_of_a_cell, get_coboundary_of_a_cell,
* get_dimension_of_a_cell and get_cell_data are the basic
@@ -112,21 +115,81 @@ class Bitmap_cubical_complex_base {
* In the case of functions that compute (co)boundary, the output is a vector if
* non-negative integers pointing to the
* positions of (co)boundary element of the input cell.
+ * Note that unlike in the case of boundary, over here the elements are
+ * not guaranteed to be returned with alternating incidence numbers.
+ *
**/
- virtual inline std::vector< size_t > get_coboundary_of_a_cell(size_t cell)const;
+ virtual inline std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const;
+
/**
- * In the case of get_dimension_of_a_cell function, the output is a non-negative integer
- * indicating the dimension of a cell.
- **/
- inline unsigned get_dimension_of_a_cell(size_t cell)const;
+ * This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of
+ * dimension n and a cube \f$B \subset A\f$ of dimension n-1, an incidence
+ * between \f$A\f$ and \f$B\f$ is the integer with which \f$B\f$ appears in the boundary of \f$A\f$.
+ * Note that first parameter is a cube of dimension n,
+ * and the second parameter is an adjusted cube in dimension n-1.
+ * Given \f$A = [b_1,e_1] \times \ldots \ [b_{j-1},e_{j-1}] \times [b_{j},e_{j}] \times [b_{j+1},e_{j+1}] \times \ldots
+ *\times [b_{n},e_{n}] \f$
+ * such that \f$ b_{j} \neq e_{j} \f$
+ * and \f$B = [b_1,e_1] \times \ldots \ [b_{j-1},e_{j-1}] \times [a,a] \times [b_{j+1},e_{j+1}] \times \ldots \times
+ *[b_{n},e_{n}] \f$
+ * where \f$ a = b_{j}\f$ or \f$ a = e_{j}\f$, the incidence between \f$A\f$ and \f$B\f$
+ * computed by this procedure is given by formula:
+ * \f$ c\ (-1)^{\sum_{i=1}^{j-1} dim [b_{i},e_{i}]} \f$
+ * Where \f$ dim [b_{i},e_{i}] = 0 \f$ if \f$ b_{i}=e_{i} \f$ and 1 in other case.
+ * c is -1 if \f$ a = b_{j}\f$ and 1 if \f$ a = e_{j}\f$.
+ * @exception std::logic_error In case when the cube \f$B\f$ is not n-1
+ * dimensional face of a cube \f$A\f$.
+ **/
+ virtual int compute_incidence_between_cells(std::size_t coface, std::size_t face) const {
+ // first get the counters for coface and face:
+ std::vector<unsigned> coface_counter = this->compute_counter_for_given_cell(coface);
+ std::vector<unsigned> face_counter = this->compute_counter_for_given_cell(face);
+
+ // coface_counter and face_counter should agree at all positions except from one:
+ int number_of_position_in_which_counters_do_not_agree = -1;
+ std::size_t number_of_full_faces_that_comes_before = 0;
+ for (std::size_t i = 0; i != coface_counter.size(); ++i) {
+ if ((coface_counter[i] % 2 == 1) && (number_of_position_in_which_counters_do_not_agree == -1)) {
+ ++number_of_full_faces_that_comes_before;
+ }
+ if (coface_counter[i] != face_counter[i]) {
+ if (number_of_position_in_which_counters_do_not_agree != -1) {
+ std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
+ throw std::logic_error(
+ "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.");
+ }
+ number_of_position_in_which_counters_do_not_agree = i;
+ }
+ }
+
+ int incidence = 1;
+ if (number_of_full_faces_that_comes_before % 2) incidence = -1;
+ // if the face cell is on the right from coface cell:
+ if (coface_counter[number_of_position_in_which_counters_do_not_agree] + 1 ==
+ face_counter[number_of_position_in_which_counters_do_not_agree]) {
+ incidence *= -1;
+ }
+
+ return incidence;
+ }
+
+ /**
+* In the case of get_dimension_of_a_cell function, the output is a non-negative integer
+* indicating the dimension of a cell.
+* Note that unlike in the case of boundary, over here the elements are
+* not guaranteed to be returned with alternating incidence numbers.
+* To compute incidence between cells use compute_incidence_between_cells
+* procedure
+**/
+ inline unsigned get_dimension_of_a_cell(std::size_t cell) const;
+
/**
* In the case of get_cell_data, the output parameter is a reference to the value of a cube in a given position.
* This allows reading and changing the value of filtration. Note that if the value of a filtration is changed, the
* code do not check if we have a filtration or not. i.e. it do not check if the value of a filtration of a cell is
* not smaller than the value of a filtration of its boundary and not greater than the value of its coboundary.
**/
- inline T& get_cell_data(size_t cell);
-
+ inline T& get_cell_data(std::size_t cell);
/**
* Typical input used to construct a baseBitmap class is a filtration given at the top dimensional cells.
@@ -141,33 +204,29 @@ class Bitmap_cubical_complex_base {
/**
* Returns dimension of a complex.
**/
- inline unsigned dimension()const {
- return sizes.size();
- }
+ inline unsigned dimension() const { return sizes.size(); }
/**
* Returns number of all cubes in the data structure.
**/
- inline unsigned size()const {
- return this->data.size();
- }
+ inline unsigned size() const { return this->data.size(); }
/**
* Writing to stream operator. By using it we get the values T of cells in order in which they are stored in the
* structure. This procedure is used for debugging purposes.
**/
template <typename K>
- friend std::ostream& operator<<(std::ostream & os, const Bitmap_cubical_complex_base<K>& b);
+ friend std::ostream& operator<<(std::ostream& os, const Bitmap_cubical_complex_base<K>& b);
/**
* Function that put the input data to bins. By putting data to bins we mean rounding them to a sequence of values
* equally distributed in the range of data.
* Sometimes if most of the cells have different birth-death times, the performance of the algorithms to compute
* persistence gets worst. When dealing with this type of data, one may want to put different values on cells to
- * some number of bins. The function put_data_to_bins( size_t number_of_bins ) is designed for that purpose.
+ * some number of bins. The function put_data_to_bins( std::size_t number_of_bins ) is designed for that purpose.
* The parameter of the function is the number of bins (distinct values) we want to have in the cubical complex.
**/
- void put_data_to_bins(size_t number_of_bins);
+ void put_data_to_bins(std::size_t number_of_bins);
/**
* Function that put the input data to bins. By putting data to bins we mean rounding them to a sequence of values
@@ -184,7 +243,7 @@ class Bitmap_cubical_complex_base {
/**
* Functions to find min and max values of filtration.
**/
- std::pair< T, T > min_max_filtration();
+ std::pair<T, T> min_max_filtration();
// ITERATORS
@@ -192,11 +251,9 @@ class Bitmap_cubical_complex_base {
* @brief Iterator through all cells in the complex (in order they appear in the structure -- i.e.
* in lexicographical order).
**/
- class All_cells_iterator : std::iterator< std::input_iterator_tag, T > {
+ class All_cells_iterator : std::iterator<std::input_iterator_tag, T> {
public:
- All_cells_iterator() {
- this->counter = 0;
- }
+ All_cells_iterator() { this->counter = 0; }
All_cells_iterator operator++() {
// first find first element of the counter that can be increased:
@@ -215,14 +272,12 @@ class Bitmap_cubical_complex_base {
return *this;
}
- bool operator==(const All_cells_iterator& rhs)const {
- if (this->counter != rhs.counter)return false;
+ bool operator==(const All_cells_iterator& rhs) const {
+ if (this->counter != rhs.counter) return false;
return true;
}
- bool operator!=(const All_cells_iterator& rhs)const {
- return !(*this == rhs);
- }
+ bool operator!=(const All_cells_iterator& rhs) const { return !(*this == rhs); }
/*
* The operator * returns position of a cube in the structure of cubical complex. This position can be then used as
@@ -231,12 +286,11 @@ class Bitmap_cubical_complex_base {
* boundary and coboundary and dimension
* and in function get_cell_data to get a filtration of a cell.
*/
- size_t operator*() {
- return this->counter;
- }
+ std::size_t operator*() { return this->counter; }
friend class Bitmap_cubical_complex_base;
+
protected:
- size_t counter;
+ std::size_t counter;
};
/**
@@ -261,71 +315,61 @@ class Bitmap_cubical_complex_base {
**/
class All_cells_range {
public:
- All_cells_range(Bitmap_cubical_complex_base* b) : b(b) { }
+ All_cells_range(Bitmap_cubical_complex_base* b) : b(b) {}
- All_cells_iterator begin() {
- return b->all_cells_iterator_begin();
- }
+ All_cells_iterator begin() { return b->all_cells_iterator_begin(); }
+
+ All_cells_iterator end() { return b->all_cells_iterator_end(); }
- All_cells_iterator end() {
- return b->all_cells_iterator_end();
- }
private:
Bitmap_cubical_complex_base<T>* b;
};
- All_cells_range all_cells_range() {
- return All_cells_range(this);
- }
-
+ All_cells_range all_cells_range() { return All_cells_range(this); }
/**
* Boundary_range class provides ranges for boundary iterators.
**/
- typedef typename std::vector< size_t >::const_iterator Boundary_iterator;
- typedef typename std::vector< size_t > Boundary_range;
+ typedef typename std::vector<std::size_t>::const_iterator Boundary_iterator;
+ typedef typename std::vector<std::size_t> Boundary_range;
/**
* boundary_simplex_range creates an object of a Boundary_simplex_range class
* that provides ranges for the Boundary_simplex_iterator.
**/
- Boundary_range boundary_range(size_t sh) {
- return this->get_boundary_of_a_cell(sh);
- }
+ Boundary_range boundary_range(std::size_t sh) { return this->get_boundary_of_a_cell(sh); }
/**
* Coboundary_range class provides ranges for boundary iterators.
**/
- typedef typename std::vector< size_t >::const_iterator Coboundary_iterator;
- typedef typename std::vector< size_t > Coboundary_range;
+ typedef typename std::vector<std::size_t>::const_iterator Coboundary_iterator;
+ typedef typename std::vector<std::size_t> Coboundary_range;
/**
* boundary_simplex_range creates an object of a Boundary_simplex_range class
* that provides ranges for the Boundary_simplex_iterator.
**/
- Coboundary_range coboundary_range(size_t sh) {
- return this->get_coboundary_of_a_cell(sh);
- }
+ Coboundary_range coboundary_range(std::size_t sh) { return this->get_coboundary_of_a_cell(sh); }
/**
* @brief Iterator through top dimensional cells of the complex. The cells appear in order they are stored
* in the structure (i.e. in lexicographical order)
**/
- class Top_dimensional_cells_iterator : std::iterator< std::input_iterator_tag, T > {
+ class Top_dimensional_cells_iterator : std::iterator<std::input_iterator_tag, T> {
public:
Top_dimensional_cells_iterator(Bitmap_cubical_complex_base& b) : b(b) {
- this->counter = std::vector<size_t>(b.dimension());
+ this->counter = std::vector<std::size_t>(b.dimension());
// std::fill( this->counter.begin() , this->counter.end() , 0 );
}
Top_dimensional_cells_iterator operator++() {
// first find first element of the counter that can be increased:
- size_t dim = 0;
- while ((dim != this->b.dimension()) && (this->counter[dim] == this->b.sizes[dim] - 1))++dim;
+ std::size_t dim = 0;
+ while ((dim != this->b.dimension()) && (this->counter[dim] == this->b.sizes[dim] - 1)) ++dim;
if (dim != this->b.dimension()) {
++this->counter[dim];
- for (size_t i = 0; i != dim; ++i) {
+ for (std::size_t i = 0; i != dim; ++i) {
this->counter[i] = 0;
}
} else {
@@ -346,18 +390,16 @@ class Bitmap_cubical_complex_base {
return *this;
}
- bool operator==(const Top_dimensional_cells_iterator& rhs)const {
- if (&this->b != &rhs.b)return false;
- if (this->counter.size() != rhs.counter.size())return false;
- for (size_t i = 0; i != this->counter.size(); ++i) {
- if (this->counter[i] != rhs.counter[i])return false;
+ bool operator==(const Top_dimensional_cells_iterator& rhs) const {
+ if (&this->b != &rhs.b) return false;
+ if (this->counter.size() != rhs.counter.size()) return false;
+ for (std::size_t i = 0; i != this->counter.size(); ++i) {
+ if (this->counter[i] != rhs.counter[i]) return false;
}
return true;
}
- bool operator!=(const Top_dimensional_cells_iterator& rhs)const {
- return !(*this == rhs);
- }
+ bool operator!=(const Top_dimensional_cells_iterator& rhs) const { return !(*this == rhs); }
/*
* The operator * returns position of a cube in the structure of cubical complex. This position can be then used as
@@ -366,26 +408,25 @@ class Bitmap_cubical_complex_base {
* boundary and coboundary and dimension
* and in function get_cell_data to get a filtration of a cell.
*/
- size_t operator*() {
- return this->compute_index_in_bitmap();
- }
+ std::size_t operator*() { return this->compute_index_in_bitmap(); }
- size_t compute_index_in_bitmap()const {
- size_t index = 0;
- for (size_t i = 0; i != this->counter.size(); ++i) {
+ std::size_t compute_index_in_bitmap() const {
+ std::size_t index = 0;
+ for (std::size_t i = 0; i != this->counter.size(); ++i) {
index += (2 * this->counter[i] + 1) * this->b.multipliers[i];
}
return index;
}
- void print_counter()const {
- for (size_t i = 0; i != this->counter.size(); ++i) {
+ void print_counter() const {
+ for (std::size_t i = 0; i != this->counter.size(); ++i) {
std::cout << this->counter[i] << " ";
}
}
friend class Bitmap_cubical_complex_base;
+
protected:
- std::vector< size_t > counter;
+ std::vector<std::size_t> counter;
Bitmap_cubical_complex_base& b;
};
@@ -402,7 +443,7 @@ class Bitmap_cubical_complex_base {
**/
Top_dimensional_cells_iterator top_dimensional_cells_iterator_end() {
Top_dimensional_cells_iterator a(*this);
- for (size_t i = 0; i != this->dimension(); ++i) {
+ for (std::size_t i = 0; i != this->dimension(); ++i) {
a.counter[i] = this->sizes[i] - 1;
}
a.counter[0]++;
@@ -414,32 +455,24 @@ class Bitmap_cubical_complex_base {
**/
class Top_dimensional_cells_range {
public:
- Top_dimensional_cells_range(Bitmap_cubical_complex_base* b) : b(b) { }
+ Top_dimensional_cells_range(Bitmap_cubical_complex_base* b) : b(b) {}
- Top_dimensional_cells_iterator begin() {
- return b->top_dimensional_cells_iterator_begin();
- }
+ Top_dimensional_cells_iterator begin() { return b->top_dimensional_cells_iterator_begin(); }
+
+ Top_dimensional_cells_iterator end() { return b->top_dimensional_cells_iterator_end(); }
- Top_dimensional_cells_iterator end() {
- return b->top_dimensional_cells_iterator_end();
- }
private:
Bitmap_cubical_complex_base<T>* b;
};
- Top_dimensional_cells_range top_dimensional_cells_range() {
- return Top_dimensional_cells_range(this);
- }
-
+ Top_dimensional_cells_range top_dimensional_cells_range() { return Top_dimensional_cells_range(this); }
//****************************************************************************************************************//
//****************************************************************************************************************//
//****************************************************************************************************************//
//****************************************************************************************************************//
- inline size_t number_cells()const {
- return this->total_number_of_cells;
- }
+ inline std::size_t number_cells() const { return this->total_number_of_cells; }
//****************************************************************************************************************//
//****************************************************************************************************************//
@@ -450,11 +483,11 @@ class Bitmap_cubical_complex_base {
std::vector<unsigned> sizes;
std::vector<unsigned> multipliers;
std::vector<T> data;
- size_t total_number_of_cells;
+ std::size_t total_number_of_cells;
void set_up_containers(const std::vector<unsigned>& sizes) {
unsigned multiplier = 1;
- for (size_t i = 0; i != sizes.size(); ++i) {
+ for (std::size_t i = 0; i != sizes.size(); ++i) {
this->sizes.push_back(sizes[i]);
this->multipliers.push_back(multiplier);
multiplier *= 2 * sizes[i] + 1;
@@ -463,18 +496,18 @@ class Bitmap_cubical_complex_base {
this->total_number_of_cells = multiplier;
}
- size_t compute_position_in_bitmap(const std::vector< unsigned >& counter) {
- size_t position = 0;
- for (size_t i = 0; i != this->multipliers.size(); ++i) {
+ std::size_t compute_position_in_bitmap(const std::vector<unsigned>& counter) {
+ std::size_t position = 0;
+ for (std::size_t i = 0; i != this->multipliers.size(); ++i) {
position += this->multipliers[i] * counter[i];
}
return position;
}
- std::vector<unsigned> compute_counter_for_given_cell(size_t cell)const {
+ std::vector<unsigned> compute_counter_for_given_cell(std::size_t cell) const {
std::vector<unsigned> counter;
counter.reserve(this->sizes.size());
- for (size_t dim = this->sizes.size(); dim != 0; --dim) {
+ for (std::size_t dim = this->sizes.size(); dim != 0; --dim) {
counter.push_back(cell / this->multipliers[dim - 1]);
cell = cell % this->multipliers[dim - 1];
}
@@ -486,96 +519,94 @@ class Bitmap_cubical_complex_base {
const std::vector<T>& top_dimensional_cells);
Bitmap_cubical_complex_base(const char* perseus_style_file, std::vector<bool> directions);
Bitmap_cubical_complex_base(const std::vector<unsigned>& sizes, std::vector<bool> directions);
- Bitmap_cubical_complex_base(const std::vector<unsigned>& dimensions,
- const std::vector<T>& top_dimensional_cells,
+ Bitmap_cubical_complex_base(const std::vector<unsigned>& dimensions, const std::vector<T>& top_dimensional_cells,
std::vector<bool> directions);
};
template <typename T>
-void Bitmap_cubical_complex_base<T>::put_data_to_bins(size_t number_of_bins) {
- bool bdg = false;
+void Bitmap_cubical_complex_base<T>::put_data_to_bins(std::size_t number_of_bins) {
+ bool dbg = false;
- std::pair< T, T > min_max = this->min_max_filtration();
- T dx = (min_max.second - min_max.first) / (T) number_of_bins;
+ std::pair<T, T> min_max = this->min_max_filtration();
+ T dx = (min_max.second - min_max.first) / (T)number_of_bins;
// now put the data into the appropriate bins:
- for (size_t i = 0; i != this->data.size(); ++i) {
- if (bdg) {
+ for (std::size_t i = 0; i != this->data.size(); ++i) {
+ if (dbg) {
std::cerr << "Before binning : " << this->data[i] << std::endl;
}
this->data[i] = min_max.first + dx * (this->data[i] - min_max.first) / number_of_bins;
- if (bdg) {
+ if (dbg) {
std::cerr << "After binning : " << this->data[i] << std::endl;
- getchar();
}
}
}
template <typename T>
void Bitmap_cubical_complex_base<T>::put_data_to_bins(T diameter_of_bin) {
- bool bdg = false;
- std::pair< T, T > min_max = this->min_max_filtration();
+ bool dbg = false;
+ std::pair<T, T> min_max = this->min_max_filtration();
- size_t number_of_bins = (min_max.second - min_max.first) / diameter_of_bin;
+ std::size_t number_of_bins = (min_max.second - min_max.first) / diameter_of_bin;
// now put the data into the appropriate bins:
- for (size_t i = 0; i != this->data.size(); ++i) {
- if (bdg) {
+ for (std::size_t i = 0; i != this->data.size(); ++i) {
+ if (dbg) {
std::cerr << "Before binning : " << this->data[i] << std::endl;
}
this->data[i] = min_max.first + diameter_of_bin * (this->data[i] - min_max.first) / number_of_bins;
- if (bdg) {
+ if (dbg) {
std::cerr << "After binning : " << this->data[i] << std::endl;
- getchar();
}
}
}
template <typename T>
-std::pair< T, T > Bitmap_cubical_complex_base<T>::min_max_filtration() {
- std::pair< T, T > min_max(std::numeric_limits<T>::max(), std::numeric_limits<T>::min());
- for (size_t i = 0; i != this->data.size(); ++i) {
- if (this->data[i] < min_max.first)min_max.first = this->data[i];
- if (this->data[i] > min_max.second)min_max.second = this->data[i];
+std::pair<T, T> Bitmap_cubical_complex_base<T>::min_max_filtration() {
+ std::pair<T, T> min_max(std::numeric_limits<T>::max(), std::numeric_limits<T>::min());
+ for (std::size_t i = 0; i != this->data.size(); ++i) {
+ if (this->data[i] < min_max.first) min_max.first = this->data[i];
+ if (this->data[i] > min_max.second) min_max.second = this->data[i];
}
return min_max;
}
template <typename K>
-std::ostream& operator<<(std::ostream & out, const Bitmap_cubical_complex_base<K>& b) {
- for (typename Bitmap_cubical_complex_base<K>::all_cells_const_iterator
- it = b.all_cells_const_begin(); it != b.all_cells_const_end(); ++it) {
+std::ostream& operator<<(std::ostream& out, const Bitmap_cubical_complex_base<K>& b) {
+ for (typename Bitmap_cubical_complex_base<K>::all_cells_const_iterator it = b.all_cells_const_begin();
+ it != b.all_cells_const_end(); ++it) {
out << *it << " ";
}
return out;
}
template <typename T>
-Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base
-(const std::vector<unsigned>& sizes) {
+Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base(const std::vector<unsigned>& sizes) {
this->set_up_containers(sizes);
}
template <typename T>
-void Bitmap_cubical_complex_base<T>::setup_bitmap_based_on_top_dimensional_cells_list(const std::vector<unsigned>& sizes_in_following_directions,
- const std::vector<T>& top_dimensional_cells) {
+void Bitmap_cubical_complex_base<T>::setup_bitmap_based_on_top_dimensional_cells_list(
+ const std::vector<unsigned>& sizes_in_following_directions, const std::vector<T>& top_dimensional_cells) {
this->set_up_containers(sizes_in_following_directions);
- size_t number_of_top_dimensional_elements = 1;
- for (size_t i = 0; i != sizes_in_following_directions.size(); ++i) {
+ std::size_t number_of_top_dimensional_elements = 1;
+ for (std::size_t i = 0; i != sizes_in_following_directions.size(); ++i) {
number_of_top_dimensional_elements *= sizes_in_following_directions[i];
}
if (number_of_top_dimensional_elements != top_dimensional_cells.size()) {
- std::cerr << "Error in constructor Bitmap_cubical_complex_base ( std::vector<size_t> sizes_in_following_directions"
- << ", std::vector<T> top_dimensional_cells ). Number of top dimensional elements that follow from "
- << "sizes_in_following_directions vector is different than the size of top_dimensional_cells vector."
- << std::endl;
- throw("Error in constructor Bitmap_cubical_complex_base( std::vector<size_t> sizes_in_following_directions,"
- "std::vector<T> top_dimensional_cells ). Number of top dimensional elements that follow from "
- "sizes_in_following_directions vector is different than the size of top_dimensional_cells vector.");
+ std::cerr << "Error in constructor Bitmap_cubical_complex_base ( std::vector<std::size_t> "
+ << "sizes_in_following_directions, std::vector<T> top_dimensional_cells ). Number of top dimensional "
+ << "elements that follow from sizes_in_following_directions vector is different than the size of "
+ << "top_dimensional_cells vector."
+ << std::endl;
+ throw(
+ "Error in constructor Bitmap_cubical_complex_base( std::vector<std::size_t> sizes_in_following_directions,"
+ "std::vector<T> top_dimensional_cells ). Number of top dimensional elements that follow from "
+ "sizes_in_following_directions vector is different than the size of top_dimensional_cells vector.");
}
Bitmap_cubical_complex_base<T>::Top_dimensional_cells_iterator it(*this);
- size_t index = 0;
+ std::size_t index = 0;
for (it = this->top_dimensional_cells_iterator_begin(); it != this->top_dimensional_cells_iterator_end(); ++it) {
this->get_cell_data(*it) = top_dimensional_cells[index];
++index;
@@ -584,8 +615,8 @@ void Bitmap_cubical_complex_base<T>::setup_bitmap_based_on_top_dimensional_cells
}
template <typename T>
-Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base
-(const std::vector<unsigned>& sizes_in_following_directions, const std::vector<T>& top_dimensional_cells) {
+Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base(const std::vector<unsigned>& sizes_in_following_directions,
+ const std::vector<T>& top_dimensional_cells) {
this->setup_bitmap_based_on_top_dimensional_cells_list(sizes_in_following_directions, top_dimensional_cells);
}
@@ -599,15 +630,17 @@ void Bitmap_cubical_complex_base<T>::read_perseus_style_file(const char* perseus
if (dbg) {
std::cerr << "dimensionOfData : " << dimensionOfData << std::endl;
- getchar();
}
std::vector<unsigned> sizes;
sizes.reserve(dimensionOfData);
- for (size_t i = 0; i != dimensionOfData; ++i) {
+ // all dimensions multiplied
+ std::size_t dimensions = 1;
+ for (std::size_t i = 0; i != dimensionOfData; ++i) {
unsigned size_in_this_dimension;
inFiltration >> size_in_this_dimension;
sizes.push_back(size_in_this_dimension);
+ dimensions *= size_in_this_dimension;
if (dbg) {
std::cerr << "size_in_this_dimension : " << size_in_this_dimension << std::endl;
}
@@ -617,19 +650,20 @@ void Bitmap_cubical_complex_base<T>::read_perseus_style_file(const char* perseus
Bitmap_cubical_complex_base<T>::Top_dimensional_cells_iterator it(*this);
it = this->top_dimensional_cells_iterator_begin();
- while (!inFiltration.eof()) {
- T filtrationLevel;
- inFiltration >> filtrationLevel;
+ T filtrationLevel;
+ for (std::size_t i = 0; i < dimensions; ++i) {
+ if (!(inFiltration >> filtrationLevel) || (inFiltration.eof())) {
+ throw std::ios_base::failure("Bad Perseus file format.");
+ }
if (dbg) {
- std::cerr << "Cell of an index : "
- << it.compute_index_in_bitmap()
- << " and dimension: "
- << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
- << " get the value : " << filtrationLevel << std::endl;
+ std::cerr << "Cell of an index : " << it.compute_index_in_bitmap()
+ << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
+ << " get the value : " << filtrationLevel << std::endl;
}
this->get_cell_data(*it) = filtrationLevel;
++it;
}
+
inFiltration.close();
this->impose_lower_star_filtration();
}
@@ -668,37 +702,44 @@ Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base(const char* perseus_
}
template <typename T>
-std::vector< size_t > Bitmap_cubical_complex_base<T>::get_boundary_of_a_cell(size_t cell)const {
- std::vector< size_t > boundary_elements;
+std::vector<std::size_t> Bitmap_cubical_complex_base<T>::get_boundary_of_a_cell(std::size_t cell) const {
+ std::vector<std::size_t> boundary_elements;
// Speed traded of for memory. Check if it is better in practice.
- boundary_elements.reserve(this->dimension()*2);
+ boundary_elements.reserve(this->dimension() * 2);
- size_t cell1 = cell;
- for (size_t i = this->multipliers.size(); i != 0; --i) {
+ std::size_t sum_of_dimensions = 0;
+ std::size_t cell1 = cell;
+ for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell1 / this->multipliers[i - 1];
if (position % 2 == 1) {
- boundary_elements.push_back(cell - this->multipliers[ i - 1 ]);
- boundary_elements.push_back(cell + this->multipliers[ i - 1 ]);
+ if (sum_of_dimensions % 2) {
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ } else {
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ }
+ ++sum_of_dimensions;
}
cell1 = cell1 % this->multipliers[i - 1];
}
+
return boundary_elements;
}
template <typename T>
-std::vector< size_t > Bitmap_cubical_complex_base<T>::get_coboundary_of_a_cell(size_t cell)const {
+std::vector<std::size_t> Bitmap_cubical_complex_base<T>::get_coboundary_of_a_cell(std::size_t cell) const {
std::vector<unsigned> counter = this->compute_counter_for_given_cell(cell);
- std::vector< size_t > coboundary_elements;
- size_t cell1 = cell;
- for (size_t i = this->multipliers.size(); i != 0; --i) {
+ std::vector<std::size_t> coboundary_elements;
+ std::size_t cell1 = cell;
+ for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell1 / this->multipliers[i - 1];
if (position % 2 == 0) {
if ((cell > this->multipliers[i - 1]) && (counter[i - 1] != 0)) {
coboundary_elements.push_back(cell - this->multipliers[i - 1]);
}
- if (
- (cell + this->multipliers[i - 1] < this->data.size()) && (counter[i - 1] != 2 * this->sizes[i - 1])) {
+ if ((cell + this->multipliers[i - 1] < this->data.size()) && (counter[i - 1] != 2 * this->sizes[i - 1])) {
coboundary_elements.push_back(cell + this->multipliers[i - 1]);
}
}
@@ -708,11 +749,11 @@ std::vector< size_t > Bitmap_cubical_complex_base<T>::get_coboundary_of_a_cell(s
}
template <typename T>
-unsigned Bitmap_cubical_complex_base<T>::get_dimension_of_a_cell(size_t cell)const {
+unsigned Bitmap_cubical_complex_base<T>::get_dimension_of_a_cell(std::size_t cell) const {
bool dbg = false;
if (dbg) std::cerr << "\n\n\n Computing position o a cell of an index : " << cell << std::endl;
unsigned dimension = 0;
- for (size_t i = this->multipliers.size(); i != 0; --i) {
+ for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell / this->multipliers[i - 1];
if (dbg) {
@@ -720,7 +761,6 @@ unsigned Bitmap_cubical_complex_base<T>::get_dimension_of_a_cell(size_t cell)con
std::cerr << "cell : " << cell << std::endl;
std::cerr << "position : " << position << std::endl;
std::cerr << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl;
- getchar();
}
if (position % 2 == 1) {
@@ -733,7 +773,7 @@ unsigned Bitmap_cubical_complex_base<T>::get_dimension_of_a_cell(size_t cell)con
}
template <typename T>
-inline T& Bitmap_cubical_complex_base<T>::get_cell_data(size_t cell) {
+inline T& Bitmap_cubical_complex_base<T>::get_cell_data(std::size_t cell) {
return this->data[cell];
}
@@ -744,12 +784,12 @@ void Bitmap_cubical_complex_base<T>::impose_lower_star_filtration() {
// this vector will be used to check which elements have already been taken care of in imposing lower star filtration
std::vector<bool> is_this_cell_considered(this->data.size(), false);
- size_t size_to_reserve = 1;
- for (size_t i = 0; i != this->multipliers.size(); ++i) {
- size_to_reserve *= (size_t) ((this->multipliers[i] - 1) / 2);
+ std::size_t size_to_reserve = 1;
+ for (std::size_t i = 0; i != this->multipliers.size(); ++i) {
+ size_to_reserve *= (std::size_t)((this->multipliers[i] - 1) / 2);
}
- std::vector<size_t> indices_to_consider;
+ std::vector<std::size_t> indices_to_consider;
indices_to_consider.reserve(size_to_reserve);
// we assume here that we already have a filtration on the top dimensional cells and
// we have to extend it to lower ones.
@@ -761,32 +801,29 @@ void Bitmap_cubical_complex_base<T>::impose_lower_star_filtration() {
while (indices_to_consider.size()) {
if (dbg) {
std::cerr << "indices_to_consider in this iteration \n";
- for (size_t i = 0; i != indices_to_consider.size(); ++i) {
+ for (std::size_t i = 0; i != indices_to_consider.size(); ++i) {
std::cout << indices_to_consider[i] << " ";
}
- getchar();
}
- std::vector<size_t> new_indices_to_consider;
- for (size_t i = 0; i != indices_to_consider.size(); ++i) {
- std::vector<size_t> bd = this->get_boundary_of_a_cell(indices_to_consider[i]);
- for (size_t boundaryIt = 0; boundaryIt != bd.size(); ++boundaryIt) {
+ std::vector<std::size_t> new_indices_to_consider;
+ for (std::size_t i = 0; i != indices_to_consider.size(); ++i) {
+ std::vector<std::size_t> bd = this->get_boundary_of_a_cell(indices_to_consider[i]);
+ for (std::size_t boundaryIt = 0; boundaryIt != bd.size(); ++boundaryIt) {
if (dbg) {
- std::cerr << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[ bd[boundaryIt] ]
- << " while of a cell: " << indices_to_consider[i] << " is: " << this->data[ indices_to_consider[i] ]
- << std::endl;
- getchar();
+ std::cerr << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]]
+ << " while of a cell: " << indices_to_consider[i] << " is: " << this->data[indices_to_consider[i]]
+ << std::endl;
}
- if (this->data[ bd[boundaryIt] ] > this->data[ indices_to_consider[i] ]) {
- this->data[ bd[boundaryIt] ] = this->data[ indices_to_consider[i] ];
+ if (this->data[bd[boundaryIt]] > this->data[indices_to_consider[i]]) {
+ this->data[bd[boundaryIt]] = this->data[indices_to_consider[i]];
if (dbg) {
- std::cerr << "Setting the value of a cell : " << bd[boundaryIt] << " to : "
- << this->data[ indices_to_consider[i] ] << std::endl;
- getchar();
+ std::cerr << "Setting the value of a cell : " << bd[boundaryIt]
+ << " to : " << this->data[indices_to_consider[i]] << std::endl;
}
}
- if (is_this_cell_considered[ bd[boundaryIt] ] == false) {
+ if (is_this_cell_considered[bd[boundaryIt]] == false) {
new_indices_to_consider.push_back(bd[boundaryIt]);
- is_this_cell_considered[ bd[boundaryIt] ] = true;
+ is_this_cell_considered[bd[boundaryIt]] = true;
}
}
}
@@ -795,8 +832,8 @@ void Bitmap_cubical_complex_base<T>::impose_lower_star_filtration() {
}
template <typename T>
-bool compareFirstElementsOfTuples(const std::pair< std::pair< T, size_t >, char >& first,
- const std::pair< std::pair< T, size_t >, char >& second) {
+bool compareFirstElementsOfTuples(const std::pair<std::pair<T, std::size_t>, char>& first,
+ const std::pair<std::pair<T, std::size_t>, char>& second) {
if (first.first.first < second.first.first) {
return true;
} else {
diff --git a/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h b/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
index c3cc93dd..4a0d1c74 100644
--- a/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
+++ b/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
@@ -28,6 +28,8 @@
#include <cmath>
#include <limits> // for numeric_limits<>
#include <vector>
+#include <stdexcept>
+#include <cstddef>
namespace Gudhi {
@@ -41,7 +43,8 @@ namespace cubical_complex {
/**
* @brief Cubical complex with periodic boundary conditions represented as a bitmap.
* @ingroup cubical_complex
- * @details This is a class implementing a bitmap data structure with periodic boundary conditions. Most of the functions are
+ * @details This is a class implementing a bitmap data structure with periodic boundary conditions. Most of the
+ * functions are
* identical to the functions from Bitmap_cubical_complex_base.
* The ones that needed to be updated are the constructors and get_boundary_of_a_cell and get_coboundary_of_a_cell.
*/
@@ -53,7 +56,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
/**
* Default constructor of Bitmap_cubical_complex_periodic_boundary_conditions_base class.
*/
- Bitmap_cubical_complex_periodic_boundary_conditions_base() { }
+ Bitmap_cubical_complex_periodic_boundary_conditions_base() {}
/**
* A constructor of Bitmap_cubical_complex_periodic_boundary_conditions_base class that takes the following
* parameters: (1) vector with numbers of top dimensional cells in all dimensions and (2) vector of booleans. If
@@ -61,8 +64,9 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
* imposed in this direction. In case of false, the periodic boundary conditions will not be imposed in the direction
* i.
*/
- Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& sizes,
- const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed);
+ Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& sizes,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed);
/**
* A constructor of Bitmap_cubical_complex_periodic_boundary_conditions_base class that takes the name of Perseus
* style file as an input. Please consult the documentation about the specification of the file.
@@ -75,9 +79,9 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
* value, that means that periodic boundary conditions are to be imposed in this direction. In case of false, the
* periodic boundary conditions will not be imposed in the direction i.
*/
- Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& dimensions,
- const std::vector<T>& topDimensionalCells,
- const std::vector< bool >& directions_in_which_periodic_b_cond_are_to_be_imposed);
+ Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& dimensions, const std::vector<T>& topDimensionalCells,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed);
/**
* Destructor of the Bitmap_cubical_complex_periodic_boundary_conditions_base class.
@@ -88,21 +92,81 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
/**
* A version of a function that return boundary of a given cell for an object of
* Bitmap_cubical_complex_periodic_boundary_conditions_base class.
+ * The boundary elements are guaranteed to be returned so that the
+ * incidence coefficients are alternating.
*/
- virtual std::vector< size_t > get_boundary_of_a_cell(size_t cell) const;
+ virtual std::vector<std::size_t> get_boundary_of_a_cell(std::size_t cell) const;
/**
* A version of a function that return coboundary of a given cell for an object of
* Bitmap_cubical_complex_periodic_boundary_conditions_base class.
+ * Note that unlike in the case of boundary, over here the elements are
+ * not guaranteed to be returned with alternating incidence numbers.
+ * To compute incidence between cells use compute_incidence_between_cells
+ * procedure
*/
- virtual std::vector< size_t > get_coboundary_of_a_cell(size_t cell) const;
+ virtual std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const;
+
+ /**
+ * This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of
+ * dimension n and a cube \f$B \subset A\f$ of dimension n-1, an incidence
+ * between \f$A\f$ and \f$B\f$ is the integer with which \f$B\f$ appears in the boundary of \f$A\f$.
+ * Note that first parameter is a cube of dimension n,
+ * and the second parameter is an adjusted cube in dimension n-1.
+ * Given \f$A = [b_1,e_1] \times \ldots \ [b_{j-1},e_{j-1}] \times [b_{j},e_{j}] \times [b_{j+1},e_{j+1}] \times \ldots
+ *\times [b_{n},e_{n}] \f$
+ * such that \f$ b_{j} \neq e_{j} \f$
+ * and \f$B = [b_1,e_1] \times \ldots \ [b_{j-1},e_{j-1}] \times [a,a] \times [b_{j+1},e_{j+1}] \times \ldots \times
+ *[b_{n},e_{n}]s \f$
+ * where \f$ a = b_{j}\f$ or \f$ a = e_{j}\f$, the incidence between \f$A\f$ and \f$B\f$
+ * computed by this procedure is given by formula:
+ * \f$ c\ (-1)^{\sum_{i=1}^{j-1} dim [b_{i},e_{i}]} \f$
+ * Where \f$ dim [b_{i},e_{i}] = 0 \f$ if \f$ b_{i}=e_{i} \f$ and 1 in other case.
+ * c is -1 if \f$ a = b_{j}\f$ and 1 if \f$ a = e_{j}\f$.
+ * @exception std::logic_error In case when the cube \f$B\f$ is not n-1
+ * dimensional face of a cube \f$A\f$.
+ **/
+ virtual int compute_incidence_between_cells(std::size_t coface, std::size_t face) {
+ // first get the counters for coface and face:
+ std::vector<unsigned> coface_counter = this->compute_counter_for_given_cell(coface);
+ std::vector<unsigned> face_counter = this->compute_counter_for_given_cell(face);
+
+ // coface_counter and face_counter should agree at all positions except from one:
+ int number_of_position_in_which_counters_do_not_agree = -1;
+ std::size_t number_of_full_faces_that_comes_before = 0;
+ for (std::size_t i = 0; i != coface_counter.size(); ++i) {
+ if ((coface_counter[i] % 2 == 1) && (number_of_position_in_which_counters_do_not_agree == -1)) {
+ ++number_of_full_faces_that_comes_before;
+ }
+ if (coface_counter[i] != face_counter[i]) {
+ if (number_of_position_in_which_counters_do_not_agree != -1) {
+ std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
+ throw std::logic_error(
+ "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.");
+ }
+ number_of_position_in_which_counters_do_not_agree = i;
+ }
+ }
+
+ int incidence = 1;
+ if (number_of_full_faces_that_comes_before % 2) incidence = -1;
+ // if the face cell is on the right from coface cell:
+ if ((coface_counter[number_of_position_in_which_counters_do_not_agree] + 1 ==
+ face_counter[number_of_position_in_which_counters_do_not_agree]) ||
+ ((coface_counter[number_of_position_in_which_counters_do_not_agree] != 1) &&
+ (face_counter[number_of_position_in_which_counters_do_not_agree] == 0))) {
+ incidence *= -1;
+ }
+
+ return incidence;
+ }
protected:
- std::vector< bool > directions_in_which_periodic_b_cond_are_to_be_imposed;
+ std::vector<bool> directions_in_which_periodic_b_cond_are_to_be_imposed;
void set_up_containers(const std::vector<unsigned>& sizes) {
unsigned multiplier = 1;
- for (size_t i = 0; i != sizes.size(); ++i) {
+ for (std::size_t i = 0; i != sizes.size(); ++i) {
this->sizes.push_back(sizes[i]);
this->multipliers.push_back(multiplier);
@@ -119,19 +183,23 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& sizes);
Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& dimensions,
const std::vector<T>& topDimensionalCells);
- void construct_complex_based_on_top_dimensional_cells(const std::vector<unsigned>& dimensions,
- const std::vector<T>& topDimensionalCells,
- const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed);
+
+ /**
+ * A procedure used to construct the data structures in the class.
+ **/
+ void construct_complex_based_on_top_dimensional_cells(
+ const std::vector<unsigned>& dimensions, const std::vector<T>& topDimensionalCells,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed);
};
template <typename T>
-void Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::construct_complex_based_on_top_dimensional_cells(const std::vector<unsigned>& dimensions,
- const std::vector<T>& topDimensionalCells,
- const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
+void Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::construct_complex_based_on_top_dimensional_cells(
+ const std::vector<unsigned>& dimensions, const std::vector<T>& topDimensionalCells,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
this->directions_in_which_periodic_b_cond_are_to_be_imposed = directions_in_which_periodic_b_cond_are_to_be_imposed;
this->set_up_containers(dimensions);
- size_t i = 0;
+ std::size_t i = 0;
for (auto it = this->top_dimensional_cells_iterator_begin(); it != this->top_dimensional_cells_iterator_end(); ++it) {
this->get_cell_data(*it) = topDimensionalCells[i];
++i;
@@ -140,14 +208,16 @@ void Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::construct_comp
}
template <typename T>
-Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& sizes,
- const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
+Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& sizes,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
this->directions_in_which_periodic_b_cond_are_to_be_imposed(directions_in_which_periodic_b_cond_are_to_be_imposed);
this->set_up_containers(sizes);
}
template <typename T>
-Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(const char* perseus_style_file) {
+Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const char* perseus_style_file) {
// for Perseus style files:
bool dbg = false;
@@ -160,7 +230,7 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_comp
std::vector<unsigned> sizes;
sizes.reserve(dimensionOfData);
- for (size_t i = 0; i != dimensionOfData; ++i) {
+ for (std::size_t i = 0; i != dimensionOfData; ++i) {
int size_in_this_dimension;
inFiltration >> size_in_this_dimension;
if (size_in_this_dimension < 0) {
@@ -176,14 +246,12 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_comp
while (!inFiltration.eof()) {
double filtrationLevel;
inFiltration >> filtrationLevel;
- if (inFiltration.eof())break;
+ if (inFiltration.eof()) break;
if (dbg) {
- std::cerr << "Cell of an index : "
- << it.compute_index_in_bitmap()
- << " and dimension: "
- << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
- << " get the value : " << filtrationLevel << std::endl;
+ std::cerr << "Cell of an index : " << it.compute_index_in_bitmap()
+ << " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
+ << " get the value : " << filtrationLevel << std::endl;
}
this->get_cell_data(*it) = filtrationLevel;
++it;
@@ -193,24 +261,24 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_comp
}
template <typename T>
-Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& sizes) {
+Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& sizes) {
this->directions_in_which_periodic_b_cond_are_to_be_imposed = std::vector<bool>(sizes.size(), false);
this->set_up_containers(sizes);
}
template <typename T>
-Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& dimensions,
- const std::vector<T>& topDimensionalCells) {
+Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& dimensions, const std::vector<T>& topDimensionalCells) {
std::vector<bool> directions_in_which_periodic_b_cond_are_to_be_imposed = std::vector<bool>(dimensions.size(), false);
this->construct_complex_based_on_top_dimensional_cells(dimensions, topDimensionalCells,
directions_in_which_periodic_b_cond_are_to_be_imposed);
}
template <typename T>
-Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::
-Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsigned>& dimensions,
- const std::vector<T>& topDimensionalCells,
- const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
+Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_complex_periodic_boundary_conditions_base(
+ const std::vector<unsigned>& dimensions, const std::vector<T>& topDimensionalCells,
+ const std::vector<bool>& directions_in_which_periodic_b_cond_are_to_be_imposed) {
this->construct_complex_based_on_top_dimensional_cells(dimensions, topDimensionalCells,
directions_in_which_periodic_b_cond_are_to_be_imposed);
}
@@ -218,46 +286,65 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base(const std::vector<unsig
// ***********************Methods************************ //
template <typename T>
-std::vector< size_t > Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::get_boundary_of_a_cell(size_t cell) const {
+std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::get_boundary_of_a_cell(
+ std::size_t cell) const {
bool dbg = false;
if (dbg) {
std::cerr << "Computations of boundary of a cell : " << cell << std::endl;
}
- std::vector< size_t > boundary_elements;
- size_t cell1 = cell;
- for (size_t i = this->multipliers.size(); i != 0; --i) {
+ std::vector<std::size_t> boundary_elements;
+ boundary_elements.reserve(this->dimension() * 2);
+ std::size_t cell1 = cell;
+ std::size_t sum_of_dimensions = 0;
+
+ for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell1 / this->multipliers[i - 1];
// this cell have a nonzero length in this direction, therefore we can compute its boundary in this direction.
-
if (position % 2 == 1) {
// if there are no periodic boundary conditions in this direction, we do not have to do anything.
if (!directions_in_which_periodic_b_cond_are_to_be_imposed[i - 1]) {
// std::cerr << "A\n";
- boundary_elements.push_back(cell - this->multipliers[ i - 1 ]);
- boundary_elements.push_back(cell + this->multipliers[ i - 1 ]);
+ if (sum_of_dimensions % 2) {
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ } else {
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ }
if (dbg) {
- std::cerr << cell - this->multipliers[ i - 1 ] << " " << cell + this->multipliers[ i - 1 ] << " ";
+ std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
}
} else {
// in this direction we have to do boundary conditions. Therefore, we need to check if we are not at the end.
- if (position != 2 * this->sizes[ i - 1 ] - 1) {
+ if (position != 2 * this->sizes[i - 1] - 1) {
// std::cerr << "B\n";
- boundary_elements.push_back(cell - this->multipliers[ i - 1 ]);
- boundary_elements.push_back(cell + this->multipliers[ i - 1 ]);
+ if (sum_of_dimensions % 2) {
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ } else {
+ boundary_elements.push_back(cell + this->multipliers[i - 1]);
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ }
if (dbg) {
- std::cerr << cell - this->multipliers[ i - 1 ] << " " << cell + this->multipliers[ i - 1 ] << " ";
+ std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
}
} else {
// std::cerr << "C\n";
- boundary_elements.push_back(cell - this->multipliers[ i - 1 ]);
- boundary_elements.push_back(cell - (2 * this->sizes[ i - 1 ] - 1) * this->multipliers[ i - 1 ]);
+ if (sum_of_dimensions % 2) {
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ boundary_elements.push_back(cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1]);
+ } else {
+ boundary_elements.push_back(cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1]);
+ boundary_elements.push_back(cell - this->multipliers[i - 1]);
+ }
if (dbg) {
- std::cerr << cell - this->multipliers[ i - 1 ] << " " <<
- cell - (2 * this->sizes[ i - 1 ] - 1) * this->multipliers[ i - 1 ] << " ";
+ std::cerr << cell - this->multipliers[i - 1] << " "
+ << cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1] << " ";
}
}
}
+ ++sum_of_dimensions;
}
cell1 = cell1 % this->multipliers[i - 1];
}
@@ -265,11 +352,12 @@ std::vector< size_t > Bitmap_cubical_complex_periodic_boundary_conditions_base<T
}
template <typename T>
-std::vector< size_t > Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::get_coboundary_of_a_cell(size_t cell) const {
+std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::get_coboundary_of_a_cell(
+ std::size_t cell) const {
std::vector<unsigned> counter = this->compute_counter_for_given_cell(cell);
- std::vector< size_t > coboundary_elements;
- size_t cell1 = cell;
- for (size_t i = this->multipliers.size(); i != 0; --i) {
+ std::vector<std::size_t> coboundary_elements;
+ std::size_t cell1 = cell;
+ for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell1 / this->multipliers[i - 1];
// if the cell has zero length in this direction, then it will have cbd in this direction.
if (position % 2 == 0) {
@@ -289,7 +377,7 @@ std::vector< size_t > Bitmap_cubical_complex_periodic_boundary_conditions_base<T
} else {
// in this case counter[i-1] == 0.
coboundary_elements.push_back(cell + this->multipliers[i - 1]);
- coboundary_elements.push_back(cell + (2 * this->sizes[ i - 1 ] - 1) * this->multipliers[i - 1]);
+ coboundary_elements.push_back(cell + (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1]);
}
}
}
diff --git a/include/gudhi/Bottleneck.h b/include/gudhi/Bottleneck.h
index 8c97dce9..7aee07bb 100644
--- a/include/gudhi/Bottleneck.h
+++ b/include/gudhi/Bottleneck.h
@@ -46,7 +46,7 @@ double bottleneck_distance_approx(Persistence_graph& g, double e) {
if (step <= b_lower_bound || step >= b_upper_bound) // Avoid precision problem
break;
m.set_r(step);
- while (m.multi_augment()) {}; // compute a maximum matching (in the graph corresponding to the current r)
+ while (m.multi_augment()) {} // compute a maximum matching (in the graph corresponding to the current r)
if (m.perfect()) {
m = biggest_unperfect;
b_upper_bound = step;
@@ -68,7 +68,7 @@ double bottleneck_distance_exact(Persistence_graph& g) {
while (lower_bound_i != upper_bound_i) {
long step = lower_bound_i + static_cast<long> ((upper_bound_i - lower_bound_i - 1) / alpha);
m.set_r(sd.at(step));
- while (m.multi_augment()) {}; // compute a maximum matching (in the graph corresponding to the current r)
+ while (m.multi_augment()) {} // compute a maximum matching (in the graph corresponding to the current r)
if (m.perfect()) {
m = biggest_unperfect;
upper_bound_i = step;
diff --git a/include/gudhi/Debug_utils.h b/include/gudhi/Debug_utils.h
index 8ed3b7b3..90d3cf47 100644
--- a/include/gudhi/Debug_utils.h
+++ b/include/gudhi/Debug_utils.h
@@ -4,7 +4,7 @@
*
* Author(s): David Salinas
*
- * Copyright (C) 2014 INRIA Sophia Antipolis-Mediterranee (France)
+ * Copyright (C) 2014 INRIA
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,7 +32,7 @@
// GUDHI_CHECK throw an exception if expression is false in debug mode, but does nothing in release mode
// Could assert in release mode, but cmake sets NDEBUG (for "NO DEBUG") in this mode, means assert does nothing.
#ifdef GUDHI_DEBUG
- #define GUDHI_CHECK(expression, excpt) if ((expression) == 0) throw excpt
+ #define GUDHI_CHECK(expression, excpt) ((expression) ? (void) 0 : (throw excpt))
#define GUDHI_CHECK_code(CODE) CODE
#else
#define GUDHI_CHECK(expression, excpt) (void) 0
diff --git a/include/gudhi/Edge_contraction.h b/include/gudhi/Edge_contraction.h
index 61f2d945..cf9a2c27 100644
--- a/include/gudhi/Edge_contraction.h
+++ b/include/gudhi/Edge_contraction.h
@@ -210,7 +210,6 @@ int main (int argc, char *argv[])
}
\endcode
-
\verbatim
./example/Contraction/RipsContraction ../../data/SO3_10000.off 0.3
[ 50%] [100%] Built target SkeletonBlockerIteration
@@ -223,9 +222,6 @@ Time to simplify and enumerate simplices:
3.166621s wall, 3.150000s user + 0.010000s system = 3.160000s CPU (99.8%)
\endverbatim
-
-
-\copyright GNU General Public License v3.
*/
/** @} */ // end defgroup
} // namespace contraction
diff --git a/include/gudhi/GIC.h b/include/gudhi/GIC.h
new file mode 100644
index 00000000..40ff7a4a
--- /dev/null
+++ b/include/gudhi/GIC.h
@@ -0,0 +1,1298 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author: Mathieu Carriere
+ *
+ * Copyright (C) 2017 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GIC_H_
+#define GIC_H_
+
+#include <gudhi/Debug_utils.h>
+#include <gudhi/graph_simplicial_complex.h>
+#include <gudhi/reader_utils.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Rips_complex.h>
+#include <gudhi/Points_off_io.h>
+#include <gudhi/distance_functions.h>
+#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/Bottleneck.h>
+
+#include <boost/config.hpp>
+#include <boost/graph/graph_traits.hpp>
+#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/connected_components.hpp>
+#include <boost/graph/dijkstra_shortest_paths.hpp>
+#include <boost/graph/subgraph.hpp>
+#include <boost/graph/graph_utility.hpp>
+
+#include <iostream>
+#include <vector>
+#include <map>
+#include <string>
+#include <limits> // for numeric_limits
+#include <utility> // for std::pair<>
+#include <algorithm> // for std::max
+#include <random>
+#include <cassert>
+#include <cmath>
+
+namespace Gudhi {
+
+namespace cover_complex {
+
+using Simplex_tree = Gudhi::Simplex_tree<>;
+using Filtration_value = Simplex_tree::Filtration_value;
+using Rips_complex = Gudhi::rips_complex::Rips_complex<Filtration_value>;
+using Persistence_diagram = std::vector<std::pair<double, double> >;
+using Graph = boost::subgraph<
+ boost::adjacency_list<boost::setS, boost::vecS, boost::undirectedS, boost::no_property,
+ boost::property<boost::edge_index_t, int, boost::property<boost::edge_weight_t, double> > > >;
+using Vertex_t = boost::graph_traits<Graph>::vertex_descriptor;
+using Index_map = boost::property_map<Graph, boost::vertex_index_t>::type;
+using Weight_map = boost::property_map<Graph, boost::edge_weight_t>::type;
+
+/**
+ * \class Cover_complex
+ * \brief Cover complex data structure.
+ *
+ * \ingroup cover_complex
+ *
+ * \details
+ * The data structure is a simplicial complex, representing a
+ * Graph Induced simplicial Complex (GIC) or a Nerve,
+ * and whose simplices are computed with a cover C of a point
+ * cloud P, which often comes from the preimages of intervals
+ * covering the image of a function f defined on P.
+ * These intervals are parameterized by their resolution
+ * (either their length or their number)
+ * and their gain (percentage of overlap).
+ * To compute a GIC, one also needs a graph G built on top of P,
+ * whose cliques with vertices belonging to different elements of C
+ * correspond to the simplices of the GIC.
+ *
+ */
+template <typename Point>
+class Cover_complex {
+ private:
+ bool verbose = false; // whether to display information.
+ std::string type; // Nerve or GIC
+
+ std::vector<Point> point_cloud; // input point cloud.
+ std::vector<std::vector<double> > distances; // all pairwise distances.
+ int maximal_dim; // maximal dimension of output simplicial complex.
+ int data_dimension; // dimension of input data.
+ int n; // number of points.
+
+ std::map<int, double> func; // function used to compute the output simplicial complex.
+ std::map<int, double>
+ func_color; // function used to compute the colors of the nodes of the output simplicial complex.
+ bool functional_cover = false; // whether we use a cover with preimages of a function or not.
+
+ Graph one_skeleton_OFF; // one-skeleton given by the input OFF file (if it exists).
+ Graph one_skeleton; // one-skeleton used to compute the connected components.
+ std::vector<Vertex_t> vertices; // vertices of one_skeleton.
+
+ std::vector<std::vector<int> > simplices; // simplices of output simplicial complex.
+ std::vector<int> voronoi_subsamples; // Voronoi germs (in case of Voronoi cover).
+
+ Persistence_diagram PD;
+ std::vector<double> distribution;
+
+ std::map<int, std::vector<int> >
+ cover; // function associating to each data point its vectors of cover elements to which it belongs.
+ std::map<int, std::vector<int> >
+ cover_back; // inverse of cover, in order to get the data points associated to a specific cover element.
+ std::map<int, double> cover_std; // standard function (induced by func) used to compute the extended persistence
+ // diagram of the output simplicial complex.
+ std::map<int, int>
+ cover_fct; // integer-valued function that allows to state if two elements of the cover are consecutive or not.
+ std::map<int, std::pair<int, double> >
+ cover_color; // size and coloring (induced by func_color) of the vertices of the output simplicial complex.
+
+ int resolution_int = -1;
+ double resolution_double = -1;
+ double gain = -1;
+ double rate_constant = 10; // Constant in the subsampling.
+ double rate_power = 0.001; // Power in the subsampling.
+ int mask = 0; // Ignore nodes containing less than mask points.
+
+ std::map<int, int> name2id, name2idinv;
+
+ std::string cover_name;
+ std::string point_cloud_name;
+ std::string color_name;
+
+ // Point comparator
+ struct Less {
+ Less(std::map<int, double> func) { Fct = func; }
+ std::map<int, double> Fct;
+ bool operator()(int a, int b) {
+ if (Fct[a] == Fct[b])
+ return a < b;
+ else
+ return Fct[a] < Fct[b];
+ }
+ };
+
+ // Remove all edges of a graph.
+ void remove_edges(Graph& G) {
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(G); ei != ei_end; ++ei) boost::remove_edge(*ei, G);
+ }
+
+ // Find random number in [0,1].
+ double GetUniform() {
+ thread_local std::default_random_engine re;
+ thread_local std::uniform_real_distribution<double> Dist(0, 1);
+ return Dist(re);
+ }
+
+ // Subsample points.
+ void SampleWithoutReplacement(int populationSize, int sampleSize, std::vector<int>& samples) {
+ int t = 0;
+ int m = 0;
+ double u;
+ while (m < sampleSize) {
+ u = GetUniform();
+ if ((populationSize - t) * u >= sampleSize - m) {
+ t++;
+ } else {
+ samples[m] = t;
+ t++;
+ m++;
+ }
+ }
+ }
+
+ // *******************************************************************************************************************
+ // Utils.
+ // *******************************************************************************************************************
+
+ public:
+ /** \brief Specifies whether the type of the output simplicial complex.
+ *
+ * @param[in] t std::string (either "GIC" or "Nerve").
+ *
+ */
+ void set_type(const std::string& t) { type = t; }
+
+ public:
+ /** \brief Specifies whether the program should display information or not.
+ *
+ * @param[in] verb boolean (true = display info, false = do not display info).
+ *
+ */
+ void set_verbose(bool verb = false) { verbose = verb; }
+
+ public:
+ /** \brief Sets the constants used to subsample the data set. These constants are
+ * explained in \cite Carriere17c.
+ *
+ * @param[in] constant double.
+ * @param[in] power double.
+ *
+ */
+ void set_subsampling(double constant, double power) {
+ rate_constant = constant;
+ rate_power = power;
+ }
+
+ public:
+ /** \brief Sets the mask, which is a threshold integer such that nodes in the complex that contain a number of data
+ * points which is less than or equal to
+ * this threshold are not displayed.
+ *
+ * @param[in] nodemask integer.
+ *
+ */
+ void set_mask(int nodemask) { mask = nodemask; }
+
+ public:
+ /** \brief Reads and stores the input point cloud.
+ *
+ * @param[in] off_file_name name of the input .OFF or .nOFF file.
+ *
+ */
+ bool read_point_cloud(const std::string& off_file_name) {
+ point_cloud_name = off_file_name;
+ std::ifstream input(off_file_name);
+ std::string line;
+
+ char comment = '#';
+ while (comment == '#') {
+ std::getline(input, line);
+ if (!line.empty() && !all_of(line.begin(), line.end(), (int (*)(int))isspace))
+ comment = line[line.find_first_not_of(' ')];
+ }
+ if (strcmp((char*)line.c_str(), "nOFF") == 0) {
+ comment = '#';
+ while (comment == '#') {
+ std::getline(input, line);
+ if (!line.empty() && !all_of(line.begin(), line.end(), (int (*)(int))isspace))
+ comment = line[line.find_first_not_of(' ')];
+ }
+ std::stringstream stream(line);
+ stream >> data_dimension;
+ } else {
+ data_dimension = 3;
+ }
+
+ comment = '#';
+ int numedges, numfaces, i, dim;
+ while (comment == '#') {
+ std::getline(input, line);
+ if (!line.empty() && !all_of(line.begin(), line.end(), (int (*)(int))isspace))
+ comment = line[line.find_first_not_of(' ')];
+ }
+ std::stringstream stream(line);
+ stream >> n;
+ stream >> numfaces;
+ stream >> numedges;
+
+ i = 0;
+ while (i < n) {
+ std::getline(input, line);
+ if (!line.empty() && line[line.find_first_not_of(' ')] != '#' &&
+ !all_of(line.begin(), line.end(), (int (*)(int))isspace)) {
+ std::stringstream iss(line);
+ std::vector<double> point;
+ point.assign(std::istream_iterator<double>(iss), std::istream_iterator<double>());
+ point_cloud.emplace_back(point.begin(), point.begin() + data_dimension);
+ boost::add_vertex(one_skeleton_OFF);
+ vertices.push_back(boost::add_vertex(one_skeleton));
+ i++;
+ }
+ }
+
+ i = 0;
+ while (i < numfaces) {
+ std::getline(input, line);
+ if (!line.empty() && line[line.find_first_not_of(' ')] != '#' &&
+ !all_of(line.begin(), line.end(), (int (*)(int))isspace)) {
+ std::vector<int> simplex;
+ std::stringstream iss(line);
+ simplex.assign(std::istream_iterator<int>(iss), std::istream_iterator<int>());
+ dim = simplex[0];
+ for (int j = 1; j <= dim; j++)
+ for (int k = j + 1; k <= dim; k++)
+ boost::add_edge(vertices[simplex[j]], vertices[simplex[k]], one_skeleton_OFF);
+ i++;
+ }
+ }
+
+ return input.is_open();
+ }
+
+ // *******************************************************************************************************************
+ // Graphs.
+ // *******************************************************************************************************************
+
+ public: // Set graph from file.
+ /** \brief Creates a graph G from a file containing the edges.
+ *
+ * @param[in] graph_file_name name of the input graph file.
+ * The graph file contains one edge per line,
+ * each edge being represented by the IDs of its two nodes.
+ *
+ */
+ void set_graph_from_file(const std::string& graph_file_name) {
+ remove_edges(one_skeleton);
+ int neighb;
+ std::ifstream input(graph_file_name);
+ std::string line;
+ int source;
+ while (std::getline(input, line)) {
+ std::stringstream stream(line);
+ stream >> source;
+ while (stream >> neighb) boost::add_edge(vertices[source], vertices[neighb], one_skeleton);
+ }
+ }
+
+ public: // Set graph from OFF file.
+ /** \brief Creates a graph G from the triangulation given by the input .OFF file.
+ *
+ */
+ void set_graph_from_OFF() {
+ remove_edges(one_skeleton);
+ if (num_edges(one_skeleton_OFF))
+ one_skeleton = one_skeleton_OFF;
+ else
+ std::cout << "No triangulation read in OFF file!" << std::endl;
+ }
+
+ public: // Set graph from Rips complex.
+ /** \brief Creates a graph G from a Rips complex.
+ *
+ * @param[in] threshold threshold value for the Rips complex.
+ * @param[in] distance distance used to compute the Rips complex.
+ *
+ */
+ template <typename Distance>
+ void set_graph_from_rips(double threshold, Distance distance) {
+ remove_edges(one_skeleton);
+ if (distances.size() == 0) compute_pairwise_distances(distance);
+ for (int i = 0; i < n; i++) {
+ for (int j = i + 1; j < n; j++) {
+ if (distances[i][j] <= threshold) {
+ boost::add_edge(vertices[i], vertices[j], one_skeleton);
+ boost::put(boost::edge_weight, one_skeleton, boost::edge(vertices[i], vertices[j], one_skeleton).first,
+ distances[i][j]);
+ }
+ }
+ }
+ }
+
+ public:
+ void set_graph_weights() {
+ Index_map index = boost::get(boost::vertex_index, one_skeleton);
+ Weight_map weight = boost::get(boost::edge_weight, one_skeleton);
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei)
+ boost::put(weight, *ei,
+ distances[index[boost::source(*ei, one_skeleton)]][index[boost::target(*ei, one_skeleton)]]);
+ }
+
+ public: // Pairwise distances.
+ /** \private \brief Computes all pairwise distances.
+ */
+ template <typename Distance>
+ void compute_pairwise_distances(Distance ref_distance) {
+ double d;
+ std::vector<double> zeros(n);
+ for (int i = 0; i < n; i++) distances.push_back(zeros);
+ std::string distance = point_cloud_name + "_dist";
+ std::ifstream input(distance, std::ios::out | std::ios::binary);
+
+ if (input.good()) {
+ if (verbose) std::cout << "Reading distances..." << std::endl;
+ for (int i = 0; i < n; i++) {
+ for (int j = i; j < n; j++) {
+ input.read((char*)&d, 8);
+ distances[i][j] = d;
+ distances[j][i] = d;
+ }
+ }
+ input.close();
+ } else {
+ if (verbose) std::cout << "Computing distances..." << std::endl;
+ input.close();
+ std::ofstream output(distance, std::ios::out | std::ios::binary);
+ for (int i = 0; i < n; i++) {
+ int state = (int)floor(100 * (i * 1.0 + 1) / n) % 10;
+ if (state == 0 && verbose) std::cout << "\r" << state << "%" << std::flush;
+ for (int j = i; j < n; j++) {
+ double dis = ref_distance(point_cloud[i], point_cloud[j]);
+ distances[i][j] = dis;
+ distances[j][i] = dis;
+ output.write((char*)&dis, 8);
+ }
+ }
+ output.close();
+ if (verbose) std::cout << std::endl;
+ }
+ }
+
+ public: // Automatic tuning of Rips complex.
+ /** \brief Creates a graph G from a Rips complex whose threshold value is automatically tuned with subsampling---see
+ * \cite Carriere17c.
+ *
+ * @param[in] distance distance between data points.
+ * @param[in] N number of subsampling iteration (the default reasonable value is 100, but there is no guarantee on
+ * how to choose it).
+ * @result delta threshold used for computing the Rips complex.
+ *
+ */
+ template <typename Distance>
+ double set_graph_from_automatic_rips(Distance distance, int N = 100) {
+ int m = floor(n / std::exp((1 + rate_power) * std::log(std::log(n) / std::log(rate_constant))));
+ m = std::min(m, n - 1);
+ std::vector<int> samples(m);
+ double delta = 0;
+
+ if (verbose) std::cout << n << " points in R^" << data_dimension << std::endl;
+ if (verbose) std::cout << "Subsampling " << m << " points" << std::endl;
+
+ if (distances.size() == 0) compute_pairwise_distances(distance);
+
+ // #pragma omp parallel for
+ for (int i = 0; i < N; i++) {
+ SampleWithoutReplacement(n, m, samples);
+ double hausdorff_dist = 0;
+ for (int j = 0; j < n; j++) {
+ double mj = distances[j][samples[0]];
+ for (int k = 1; k < m; k++) mj = std::min(mj, distances[j][samples[k]]);
+ hausdorff_dist = std::max(hausdorff_dist, mj);
+ }
+ delta += hausdorff_dist / N;
+ }
+
+ if (verbose) std::cout << "delta = " << delta << std::endl;
+ set_graph_from_rips(delta, distance);
+ return delta;
+ }
+
+ // *******************************************************************************************************************
+ // Functions.
+ // *******************************************************************************************************************
+
+ public: // Set function from file.
+ /** \brief Creates the function f from a file containing the function values.
+ *
+ * @param[in] func_file_name name of the input function file.
+ *
+ */
+ void set_function_from_file(const std::string& func_file_name) {
+ int i = 0;
+ std::ifstream input(func_file_name);
+ std::string line;
+ double f;
+ while (std::getline(input, line)) {
+ std::stringstream stream(line);
+ stream >> f;
+ func.emplace(i, f);
+ i++;
+ }
+ functional_cover = true;
+ cover_name = func_file_name;
+ }
+
+ public: // Set function from kth coordinate
+ /** \brief Creates the function f from the k-th coordinate of the point cloud P.
+ *
+ * @param[in] k coordinate to use (start at 0).
+ *
+ */
+ void set_function_from_coordinate(int k) {
+ for (int i = 0; i < n; i++) func.emplace(i, point_cloud[i][k]);
+ functional_cover = true;
+ cover_name = "coordinate " + std::to_string(k);
+ }
+
+ public: // Set function from vector.
+ /** \brief Creates the function f from a vector stored in memory.
+ *
+ * @param[in] function input vector of values.
+ *
+ */
+ template <class InputRange>
+ void set_function_from_range(InputRange const& function) {
+ for (int i = 0; i < n; i++) func.emplace(i, function[i]);
+ functional_cover = true;
+ }
+
+ // *******************************************************************************************************************
+ // Covers.
+ // *******************************************************************************************************************
+
+ public: // Automatic tuning of resolution.
+ /** \brief Computes the optimal length of intervals
+ * (i.e. the smallest interval length avoiding discretization artifacts---see \cite Carriere17c) for a functional
+ * cover.
+ *
+ * @result reso interval length used to compute the cover.
+ *
+ */
+ double set_automatic_resolution() {
+ if (!functional_cover) {
+ std::cout << "Cover needs to come from the preimages of a function." << std::endl;
+ return 0;
+ }
+ if (type != "Nerve" && type != "GIC") {
+ std::cout << "Type of complex needs to be specified." << std::endl;
+ return 0;
+ }
+
+ double reso = 0;
+ Index_map index = boost::get(boost::vertex_index, one_skeleton);
+
+ if (type == "GIC") {
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei)
+ reso = std::max(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] -
+ func[index[boost::target(*ei, one_skeleton)]]));
+ if (verbose) std::cout << "resolution = " << reso << std::endl;
+ resolution_double = reso;
+ }
+
+ if (type == "Nerve") {
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei)
+ reso = std::max(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] -
+ func[index[boost::target(*ei, one_skeleton)]]) /
+ gain);
+ if (verbose) std::cout << "resolution = " << reso << std::endl;
+ resolution_double = reso;
+ }
+
+ return reso;
+ }
+
+ public:
+ /** \brief Sets a length of intervals from a value stored in memory.
+ *
+ * @param[in] reso length of intervals.
+ *
+ */
+ void set_resolution_with_interval_length(double reso) { resolution_double = reso; }
+ /** \brief Sets a number of intervals from a value stored in memory.
+ *
+ * @param[in] reso number of intervals.
+ *
+ */
+ void set_resolution_with_interval_number(int reso) { resolution_int = reso; }
+ /** \brief Sets a gain from a value stored in memory (default value 0.3).
+ *
+ * @param[in] g gain.
+ *
+ */
+ void set_gain(double g = 0.3) { gain = g; }
+
+ public: // Set cover with preimages of function.
+ /** \brief Creates a cover C from the preimages of the function f.
+ *
+ */
+ void set_cover_from_function() {
+ if (resolution_double == -1 && resolution_int == -1) {
+ std::cout << "Number and/or length of intervals not specified" << std::endl;
+ return;
+ }
+ if (gain == -1) {
+ std::cout << "Gain not specified" << std::endl;
+ return;
+ }
+
+ // Read function values and compute min and max
+ double minf = std::numeric_limits<float>::max();
+ double maxf = std::numeric_limits<float>::lowest();
+ for (int i = 0; i < n; i++) {
+ minf = std::min(minf, func[i]);
+ maxf = std::max(maxf, func[i]);
+ }
+ if (verbose) std::cout << "Min function value = " << minf << " and Max function value = " << maxf << std::endl;
+
+ // Compute cover of im(f)
+ std::vector<std::pair<double, double> > intervals;
+ int res;
+
+ if (resolution_double == -1) { // Case we use an integer for the number of intervals.
+ double incr = (maxf - minf) / resolution_int;
+ double x = minf;
+ double alpha = (incr * gain) / (2 - 2 * gain);
+ double y = minf + incr + alpha;
+ std::pair<double, double> interm(x, y);
+ intervals.push_back(interm);
+ for (int i = 1; i < resolution_int - 1; i++) {
+ x = minf + i * incr - alpha;
+ y = minf + (i + 1) * incr + alpha;
+ std::pair<double, double> inter(x, y);
+ intervals.push_back(inter);
+ }
+ x = minf + (resolution_int - 1) * incr - alpha;
+ y = maxf;
+ std::pair<double, double> interM(x, y);
+ intervals.push_back(interM);
+ res = intervals.size();
+ if (verbose) {
+ for (int i = 0; i < res; i++)
+ std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ << std::endl;
+ }
+ } else {
+ if (resolution_int == -1) { // Case we use a double for the length of the intervals.
+ double x = minf;
+ double y = x + resolution_double;
+ while (y <= maxf && maxf - (y - gain * resolution_double) >= resolution_double) {
+ std::pair<double, double> inter(x, y);
+ intervals.push_back(inter);
+ x = y - gain * resolution_double;
+ y = x + resolution_double;
+ }
+ std::pair<double, double> interM(x, maxf);
+ intervals.push_back(interM);
+ res = intervals.size();
+ if (verbose) {
+ for (int i = 0; i < res; i++)
+ std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ << std::endl;
+ }
+ } else { // Case we use an integer and a double for the length of the intervals.
+ double x = minf;
+ double y = x + resolution_double;
+ int count = 0;
+ while (count < resolution_int && y <= maxf && maxf - (y - gain * resolution_double) >= resolution_double) {
+ std::pair<double, double> inter(x, y);
+ intervals.push_back(inter);
+ count++;
+ x = y - gain * resolution_double;
+ y = x + resolution_double;
+ }
+ res = intervals.size();
+ if (verbose) {
+ for (int i = 0; i < res; i++)
+ std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ << std::endl;
+ }
+ }
+ }
+
+ // Sort points according to function values
+ std::vector<int> points(n);
+ for (int i = 0; i < n; i++) points[i] = i;
+ std::sort(points.begin(), points.end(), Less(this->func));
+
+ int id = 0;
+ int pos = 0;
+ Index_map index = boost::get(boost::vertex_index, one_skeleton); // int maxc = -1;
+ std::map<int, std::vector<int> > preimages;
+ std::map<int, double> funcstd;
+
+ if (verbose) std::cout << "Computing preimages..." << std::endl;
+ for (int i = 0; i < res; i++) {
+ // Find points in the preimage
+ std::pair<double, double> inter1 = intervals[i];
+ int tmp = pos;
+ double u, v;
+
+ if (i != res - 1) {
+ if (i != 0) {
+ std::pair<double, double> inter3 = intervals[i - 1];
+ while (func[points[tmp]] < inter3.second && tmp != n) {
+ preimages[i].push_back(points[tmp]);
+ tmp++;
+ }
+ u = inter3.second;
+ } else {
+ u = inter1.first;
+ }
+
+ std::pair<double, double> inter2 = intervals[i + 1];
+ while (func[points[tmp]] < inter2.first && tmp != n) {
+ preimages[i].push_back(points[tmp]);
+ tmp++;
+ }
+ v = inter2.first;
+ pos = tmp;
+ while (func[points[tmp]] < inter1.second && tmp != n) {
+ preimages[i].push_back(points[tmp]);
+ tmp++;
+ }
+
+ } else {
+ std::pair<double, double> inter3 = intervals[i - 1];
+ while (func[points[tmp]] < inter3.second && tmp != n) {
+ preimages[i].push_back(points[tmp]);
+ tmp++;
+ }
+ while (tmp != n) {
+ preimages[i].push_back(points[tmp]);
+ tmp++;
+ }
+ u = inter3.second;
+ v = inter1.second;
+ }
+
+ funcstd[i] = 0.5 * (u + v);
+ }
+
+ if (verbose) std::cout << "Computing connected components..." << std::endl;
+ // #pragma omp parallel for
+ for (int i = 0; i < res; i++) {
+ // Compute connected components
+ Graph G = one_skeleton.create_subgraph();
+ int num = preimages[i].size();
+ std::vector<int> component(num);
+ for (int j = 0; j < num; j++) boost::add_vertex(index[vertices[preimages[i][j]]], G);
+ boost::connected_components(G, &component[0]);
+ int max = 0;
+
+ // For each point in preimage
+ for (int j = 0; j < num; j++) {
+ // Update number of components in preimage
+ if (component[j] > max) max = component[j];
+
+ // Identify component with Cantor polynomial N^2 -> N
+ int identifier = (std::pow(i + component[j], 2) + 3 * i + component[j]) / 2;
+
+ // Update covers
+ cover[preimages[i][j]].push_back(identifier);
+ cover_back[identifier].push_back(preimages[i][j]);
+ cover_fct[identifier] = i;
+ cover_std[identifier] = funcstd[i];
+ cover_color[identifier].second += func_color[preimages[i][j]];
+ cover_color[identifier].first += 1;
+ }
+
+ // Maximal dimension is total number of connected components
+ id += max + 1;
+ }
+
+ maximal_dim = id - 1;
+ for (std::map<int, std::pair<int, double> >::iterator iit = cover_color.begin(); iit != cover_color.end(); iit++)
+ iit->second.second /= iit->second.first;
+ }
+
+ public: // Set cover from file.
+ /** \brief Creates the cover C from a file containing the cover elements of each point (the order has to be the same
+ * as in the input file!).
+ *
+ * @param[in] cover_file_name name of the input cover file.
+ *
+ */
+ void set_cover_from_file(const std::string& cover_file_name) {
+ int i = 0;
+ int cov;
+ std::vector<int> cov_elts, cov_number;
+ std::ifstream input(cover_file_name);
+ std::string line;
+ while (std::getline(input, line)) {
+ cov_elts.clear();
+ std::stringstream stream(line);
+ while (stream >> cov) {
+ cov_elts.push_back(cov);
+ cov_number.push_back(cov);
+ cover_fct[cov] = cov;
+ cover_color[cov].second += func_color[i];
+ cover_color[cov].first++;
+ cover_back[cov].push_back(i);
+ }
+ cover[i] = cov_elts;
+ i++;
+ }
+
+ std::sort(cov_number.begin(), cov_number.end());
+ std::vector<int>::iterator it = std::unique(cov_number.begin(), cov_number.end());
+ cov_number.resize(std::distance(cov_number.begin(), it));
+
+ maximal_dim = cov_number.size() - 1;
+ for (int i = 0; i <= maximal_dim; i++) cover_color[i].second /= cover_color[i].first;
+ cover_name = cover_file_name;
+ }
+
+ public: // Set cover from Voronoi
+ /** \brief Creates the cover C from the Voronoï cells of a subsampling of the point cloud.
+ *
+ * @param[in] distance distance between the points.
+ * @param[in] m number of points in the subsample.
+ *
+ */
+ template <typename Distance>
+ void set_cover_from_Voronoi(Distance distance, int m = 100) {
+ voronoi_subsamples.resize(m);
+ SampleWithoutReplacement(n, m, voronoi_subsamples);
+ if (distances.size() == 0) compute_pairwise_distances(distance);
+ set_graph_weights();
+ Weight_map weight = boost::get(boost::edge_weight, one_skeleton);
+ Index_map index = boost::get(boost::vertex_index, one_skeleton);
+ std::vector<double> mindist(n);
+ for (int j = 0; j < n; j++) mindist[j] = std::numeric_limits<double>::max();
+
+ // Compute the geodesic distances to subsamples with Dijkstra
+ // #pragma omp parallel for
+ for (int i = 0; i < m; i++) {
+ if (verbose) std::cout << "Computing geodesic distances to seed " << i << "..." << std::endl;
+ int seed = voronoi_subsamples[i];
+ std::vector<double> dmap(n);
+ boost::dijkstra_shortest_paths(
+ one_skeleton, vertices[seed],
+ boost::weight_map(weight).distance_map(boost::make_iterator_property_map(dmap.begin(), index)));
+
+ for (int j = 0; j < n; j++)
+ if (mindist[j] > dmap[j]) {
+ mindist[j] = dmap[j];
+ if (cover[j].size() == 0)
+ cover[j].push_back(i);
+ else
+ cover[j][0] = i;
+ }
+ }
+
+ for (int i = 0; i < n; i++) {
+ cover_back[cover[i][0]].push_back(i);
+ cover_color[cover[i][0]].second += func_color[i];
+ cover_color[cover[i][0]].first++;
+ }
+ for (int i = 0; i < m; i++) cover_color[i].second /= cover_color[i].first;
+ maximal_dim = m - 1;
+ cover_name = "Voronoi";
+ }
+
+ public: // return subset of data corresponding to a node
+ /** \brief Returns the data subset corresponding to a specific node of the created complex.
+ *
+ * @param[in] c ID of the node.
+ * @result cover_back(c) vector of IDs of data points.
+ *
+ */
+ const std::vector<int>& subpopulation(int c) { return cover_back[name2idinv[c]]; }
+
+ // *******************************************************************************************************************
+ // Visualization.
+ // *******************************************************************************************************************
+
+ public: // Set color from file.
+ /** \brief Computes the function used to color the nodes of the simplicial complex from a file containing the function
+ * values.
+ *
+ * @param[in] color_file_name name of the input color file.
+ *
+ */
+ void set_color_from_file(const std::string& color_file_name) {
+ int i = 0;
+ std::ifstream input(color_file_name);
+ std::string line;
+ double f;
+ while (std::getline(input, line)) {
+ std::stringstream stream(line);
+ stream >> f;
+ func_color.emplace(i, f);
+ i++;
+ }
+ color_name = color_file_name;
+ }
+
+ public: // Set color from kth coordinate
+ /** \brief Computes the function used to color the nodes of the simplicial complex from the k-th coordinate.
+ *
+ * @param[in] k coordinate to use (start at 0).
+ *
+ */
+ void set_color_from_coordinate(int k = 0) {
+ for (int i = 0; i < n; i++) func_color[i] = point_cloud[i][k];
+ color_name = "coordinate ";
+ color_name.append(std::to_string(k));
+ }
+
+ public: // Set color from vector.
+ /** \brief Computes the function used to color the nodes of the simplicial complex from a vector stored in memory.
+ *
+ * @param[in] color input vector of values.
+ *
+ */
+ void set_color_from_vector(std::vector<double> color) {
+ for (unsigned int i = 0; i < color.size(); i++) func_color[i] = color[i];
+ }
+
+ public: // Create a .dot file that can be compiled with neato to produce a .pdf file.
+ /** \brief Creates a .dot file called SC.dot for neato (part of the graphviz package) once the simplicial complex is
+ * computed to get a visualization
+ * of its 1-skeleton in a .pdf file.
+ */
+ void plot_DOT() {
+ std::string mapp = point_cloud_name + "_sc.dot";
+ std::ofstream graphic(mapp);
+
+ double maxv = std::numeric_limits<double>::lowest();
+ double minv = std::numeric_limits<double>::max();
+ for (std::map<int, std::pair<int, double> >::iterator iit = cover_color.begin(); iit != cover_color.end(); iit++) {
+ maxv = std::max(maxv, iit->second.second);
+ minv = std::min(minv, iit->second.second);
+ }
+
+ int k = 0;
+ std::vector<int> nodes;
+ nodes.clear();
+
+ graphic << "graph GIC {" << std::endl;
+ int id = 0;
+ for (std::map<int, std::pair<int, double> >::iterator iit = cover_color.begin(); iit != cover_color.end(); iit++) {
+ if (iit->second.first > mask) {
+ nodes.push_back(iit->first);
+ name2id[iit->first] = id;
+ name2idinv[id] = iit->first;
+ id++;
+ graphic << name2id[iit->first] << "[shape=circle fontcolor=black color=black label=\"" << name2id[iit->first]
+ << ":" << iit->second.first << "\" style=filled fillcolor=\""
+ << (1 - (maxv - iit->second.second) / (maxv - minv)) * 0.6 << ", 1, 1\"]" << std::endl;
+ k++;
+ }
+ }
+ int ke = 0;
+ int num_simplices = simplices.size();
+ for (int i = 0; i < num_simplices; i++)
+ if (simplices[i].size() == 2) {
+ if (cover_color[simplices[i][0]].first > mask && cover_color[simplices[i][1]].first > mask) {
+ graphic << " " << name2id[simplices[i][0]] << " -- " << name2id[simplices[i][1]] << " [weight=15];"
+ << std::endl;
+ ke++;
+ }
+ }
+ graphic << "}";
+ graphic.close();
+ std::cout << mapp << " file generated. It can be visualized with e.g. neato." << std::endl;
+ }
+
+ public: // Create a .txt file that can be compiled with KeplerMapper.
+ /** \brief Creates a .txt file called SC.txt describing the 1-skeleton, which can then be plotted with e.g.
+ * KeplerMapper.
+ */
+ void write_info() {
+ int num_simplices = simplices.size();
+ int num_edges = 0;
+ std::string mapp = point_cloud_name + "_sc.txt";
+ std::ofstream graphic(mapp);
+
+ for (int i = 0; i < num_simplices; i++)
+ if (simplices[i].size() == 2)
+ if (cover_color[simplices[i][0]].first > mask && cover_color[simplices[i][1]].first > mask) num_edges++;
+
+ graphic << point_cloud_name << std::endl;
+ graphic << cover_name << std::endl;
+ graphic << color_name << std::endl;
+ graphic << resolution_double << " " << gain << std::endl;
+ graphic << cover_color.size() << " " << num_edges << std::endl;
+
+ int id = 0;
+ for (std::map<int, std::pair<int, double> >::iterator iit = cover_color.begin(); iit != cover_color.end(); iit++) {
+ graphic << id << " " << iit->second.second << " " << iit->second.first << std::endl;
+ name2id[iit->first] = id;
+ name2idinv[id] = iit->first;
+ id++;
+ }
+
+ for (int i = 0; i < num_simplices; i++)
+ if (simplices[i].size() == 2)
+ if (cover_color[simplices[i][0]].first > mask && cover_color[simplices[i][1]].first > mask)
+ graphic << name2id[simplices[i][0]] << " " << name2id[simplices[i][1]] << std::endl;
+ graphic.close();
+ std::cout << mapp
+ << " generated. It can be visualized with e.g. python KeplerMapperVisuFromTxtFile.py and firefox."
+ << std::endl;
+ }
+
+ public: // Create a .off file that can be visualized (e.g. with Geomview).
+ /** \brief Creates a .off file called SC.off for 3D visualization, which contains the 2-skeleton of the GIC.
+ * This function assumes that the cover has been computed with Voronoi. If data points are in 1D or 2D,
+ * the remaining coordinates of the points embedded in 3D are set to 0.
+ */
+ void plot_OFF() {
+ assert(cover_name == "Voronoi");
+
+ int m = voronoi_subsamples.size();
+ int numedges = 0;
+ int numfaces = 0;
+ std::vector<std::vector<int> > edges, faces;
+ int numsimplices = simplices.size();
+
+ std::string mapp = point_cloud_name + "_sc.off";
+ std::ofstream graphic(mapp);
+
+ graphic << "OFF" << std::endl;
+ for (int i = 0; i < numsimplices; i++) {
+ if (simplices[i].size() == 2) {
+ numedges++;
+ edges.push_back(simplices[i]);
+ }
+ if (simplices[i].size() == 3) {
+ numfaces++;
+ faces.push_back(simplices[i]);
+ }
+ }
+ graphic << m << " " << numedges + numfaces << std::endl;
+ for (int i = 0; i < m; i++) {
+ if (data_dimension <= 3) {
+ for (int j = 0; j < data_dimension; j++) graphic << point_cloud[voronoi_subsamples[i]][j] << " ";
+ for (int j = data_dimension; j < 3; j++) graphic << 0 << " ";
+ graphic << std::endl;
+ } else {
+ for (int j = 0; j < 3; j++) graphic << point_cloud[voronoi_subsamples[i]][j] << " ";
+ }
+ }
+ for (int i = 0; i < numedges; i++) graphic << 2 << " " << edges[i][0] << " " << edges[i][1] << std::endl;
+ for (int i = 0; i < numfaces; i++)
+ graphic << 3 << " " << faces[i][0] << " " << faces[i][1] << " " << faces[i][2] << std::endl;
+ graphic.close();
+ std::cout << mapp << " generated. It can be visualized with e.g. geomview." << std::endl;
+ }
+
+ // *******************************************************************************************************************
+ // Extended Persistence Diagrams.
+ // *******************************************************************************************************************
+
+ public:
+ /** \brief Computes the extended persistence diagram of the complex.
+ *
+ */
+ void compute_PD() {
+ Simplex_tree st;
+
+ // Compute max and min
+ double maxf = std::numeric_limits<double>::lowest();
+ double minf = std::numeric_limits<double>::max();
+ for (std::map<int, double>::iterator it = cover_std.begin(); it != cover_std.end(); it++) {
+ maxf = std::max(maxf, it->second);
+ minf = std::min(minf, it->second);
+ }
+
+ for (auto const& simplex : simplices) {
+ // Add a simplex and a cone on it
+ std::vector<int> splx = simplex;
+ splx.push_back(-2);
+ st.insert_simplex_and_subfaces(splx);
+ }
+
+ // Build filtration
+ for (auto simplex : st.complex_simplex_range()) {
+ double filta = std::numeric_limits<double>::lowest();
+ double filts = filta;
+ bool ascending = true;
+ for (auto vertex : st.simplex_vertex_range(simplex)) {
+ if (vertex == -2) {
+ ascending = false;
+ continue;
+ }
+ filta = std::max(-2 + (cover_std[vertex] - minf) / (maxf - minf), filta);
+ filts = std::max(2 - (cover_std[vertex] - minf) / (maxf - minf), filts);
+ }
+ if (ascending)
+ st.assign_filtration(simplex, filta);
+ else
+ st.assign_filtration(simplex, filts);
+ }
+ int magic[] = {-2};
+ st.assign_filtration(st.find(magic), -3);
+
+ // Compute PD
+ st.initialize_filtration();
+ Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Gudhi::persistent_cohomology::Field_Zp> pcoh(st);
+ pcoh.init_coefficients(2);
+ pcoh.compute_persistent_cohomology();
+
+ // Output PD
+ int max_dim = st.dimension();
+ for (int i = 0; i < max_dim; i++) {
+ std::vector<std::pair<double, double> > bars = pcoh.intervals_in_dimension(i);
+ int num_bars = bars.size();
+ if(verbose) std::cout << num_bars << " interval(s) in dimension " << i << ":" << std::endl;
+ for (int j = 0; j < num_bars; j++) {
+ double birth = bars[j].first;
+ double death = bars[j].second;
+ if (i == 0 && std::isinf(death)) continue;
+ if (birth < 0)
+ birth = minf + (birth + 2) * (maxf - minf);
+ else
+ birth = minf + (2 - birth) * (maxf - minf);
+ if (death < 0)
+ death = minf + (death + 2) * (maxf - minf);
+ else
+ death = minf + (2 - death) * (maxf - minf);
+ PD.push_back(std::pair<double, double>(birth, death));
+ if (verbose) std::cout << " [" << birth << ", " << death << "]" << std::endl;
+ }
+ }
+ }
+
+ public:
+ /** \brief Computes bootstrapped distances distribution.
+ *
+ * @param[in] N number of bootstrap iterations.
+ *
+ */
+ template <typename SimplicialComplex>
+ void compute_distribution(int N = 100) {
+ if (distribution.size() >= N) {
+ std::cout << "Already done!" << std::endl;
+ } else {
+ for (int i = 0; i < N - distribution.size(); i++) {
+ Cover_complex Cboot;
+ Cboot.n = this->n;
+ std::vector<int> boot(this->n);
+ for (int j = 0; j < this->n; j++) {
+ double u = GetUniform();
+ int id = std::floor(u * (this->n));
+ boot[j] = id;
+ Cboot.point_cloud[j] = this->point_cloud[id];
+ Cboot.func.emplace(j, this->func[id]);
+ }
+ for (int j = 0; j < n; j++) {
+ std::vector<double> dist(n);
+ for (int k = 0; k < n; k++) dist[k] = distances[boot[j]][boot[k]];
+ Cboot.distances.push_back(dist);
+ }
+
+ Cboot.set_graph_from_automatic_rips(Gudhi::Euclidean_distance());
+ Cboot.set_automatic_resolution();
+ Cboot.set_gain();
+ Cboot.set_cover_from_function();
+ Cboot.find_simplices();
+ Cboot.compute_PD();
+
+ distribution.push_back(Gudhi::persistence_diagram::bottleneck_distance(this->PD, Cboot.PD));
+ }
+
+ std::sort(distribution.begin(), distribution.end());
+ }
+ }
+
+ public:
+ /** \brief Computes the bottleneck distance threshold corresponding to a specific confidence level.
+ *
+ * @param[in] alpha Confidence level.
+ *
+ */
+ double compute_distance_from_confidence_level(double alpha) {
+ int N = distribution.size();
+ return distribution[std::floor(alpha * N)];
+ }
+
+ public:
+ /** \brief Computes the confidence level of a specific bottleneck distance threshold.
+ *
+ * @param[in] d Bottleneck distance.
+ *
+ */
+ double compute_confidence_level_from_distance(double d) {
+ int N = distribution.size();
+ for (int i = 0; i < N; i++)
+ if (distribution[i] > d) return i * 1.0 / N;
+ }
+
+ public:
+ /** \brief Computes the p-value, i.e. the opposite of the confidence level of the largest bottleneck
+ * distance preserving the points in the persistence diagram of the output simplicial complex.
+ *
+ */
+ double compute_p_value() {
+ double distancemin = -std::numeric_limits<double>::lowest();
+ int N = PD.size();
+ for (int i = 0; i < N; i++) distancemin = std::min(distancemin, 0.5 * (PD[i].second - PD[i].first));
+ return 1 - compute_confidence_level_from_distance(distancemin);
+ }
+
+ // *******************************************************************************************************************
+ // Computation of simplices.
+ // *******************************************************************************************************************
+
+ public:
+ /** \brief Creates the simplicial complex.
+ *
+ * @param[in] complex SimplicialComplex to be created.
+ *
+ */
+ template <typename SimplicialComplex>
+ void create_complex(SimplicialComplex& complex) {
+ unsigned int dimension = 0;
+ for (auto const& simplex : simplices) {
+ int numvert = simplex.size();
+ double filt = std::numeric_limits<double>::lowest();
+ for (int i = 0; i < numvert; i++) filt = std::max(cover_color[simplex[i]].second, filt);
+ complex.insert_simplex_and_subfaces(simplex, filt);
+ if (dimension < simplex.size() - 1) dimension = simplex.size() - 1;
+ }
+ }
+
+ public:
+ /** \brief Computes the simplices of the simplicial complex.
+ */
+ void find_simplices() {
+ if (type != "Nerve" && type != "GIC") {
+ std::cout << "Type of complex needs to be specified." << std::endl;
+ return;
+ }
+
+ if (type == "Nerve") {
+ for(auto& simplex : cover)
+ simplices.push_back(simplex.second);
+ std::sort(simplices.begin(), simplices.end());
+ std::vector<std::vector<int> >::iterator it = std::unique(simplices.begin(), simplices.end());
+ simplices.resize(std::distance(simplices.begin(), it));
+ }
+
+ if (type == "GIC") {
+ Index_map index = boost::get(boost::vertex_index, one_skeleton);
+
+ if (functional_cover) {
+ // Computes the simplices in the GIC by looking at all the edges of the graph and adding the
+ // corresponding edges in the GIC if the images of the endpoints belong to consecutive intervals.
+
+ if (gain >= 0.5)
+ throw std::invalid_argument(
+ "the output of this function is correct ONLY if the cover is minimal, i.e. the gain is less than 0.5.");
+
+ // Loop on all edges.
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei) {
+ int nums = cover[index[boost::source(*ei, one_skeleton)]].size();
+ for (int i = 0; i < nums; i++) {
+ int vs = cover[index[boost::source(*ei, one_skeleton)]][i];
+ int numt = cover[index[boost::target(*ei, one_skeleton)]].size();
+ for (int j = 0; j < numt; j++) {
+ int vt = cover[index[boost::target(*ei, one_skeleton)]][j];
+ if (cover_fct[vs] == cover_fct[vt] + 1 || cover_fct[vt] == cover_fct[vs] + 1) {
+ std::vector<int> edge(2);
+ edge[0] = std::min(vs, vt);
+ edge[1] = std::max(vs, vt);
+ simplices.push_back(edge);
+ goto afterLoop;
+ }
+ }
+ }
+ afterLoop:;
+ }
+ std::sort(simplices.begin(), simplices.end());
+ std::vector<std::vector<int> >::iterator it = std::unique(simplices.begin(), simplices.end());
+ simplices.resize(std::distance(simplices.begin(), it));
+
+ } else {
+ // Find edges to keep
+ Simplex_tree st;
+ boost::graph_traits<Graph>::edge_iterator ei, ei_end;
+ for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei)
+ if (!(cover[index[boost::target(*ei, one_skeleton)]].size() == 1 &&
+ cover[index[boost::target(*ei, one_skeleton)]] == cover[index[boost::source(*ei, one_skeleton)]])) {
+ std::vector<int> edge(2);
+ edge[0] = index[boost::source(*ei, one_skeleton)];
+ edge[1] = index[boost::target(*ei, one_skeleton)];
+ st.insert_simplex_and_subfaces(edge);
+ }
+
+ // st.insert_graph(one_skeleton);
+
+ // Build the Simplex Tree corresponding to the graph
+ st.expansion(maximal_dim);
+
+ // Find simplices of GIC
+ simplices.clear();
+ for (auto simplex : st.complex_simplex_range()) {
+ if (!st.has_children(simplex)) {
+ std::vector<int> simplx;
+ for (auto vertex : st.simplex_vertex_range(simplex)) {
+ unsigned int sz = cover[vertex].size();
+ for (unsigned int i = 0; i < sz; i++) {
+ simplx.push_back(cover[vertex][i]);
+ }
+ }
+ std::sort(simplx.begin(), simplx.end());
+ std::vector<int>::iterator it = std::unique(simplx.begin(), simplx.end());
+ simplx.resize(std::distance(simplx.begin(), it));
+ simplices.push_back(simplx);
+ }
+ }
+ std::sort(simplices.begin(), simplices.end());
+ std::vector<std::vector<int> >::iterator it = std::unique(simplices.begin(), simplices.end());
+ simplices.resize(std::distance(simplices.begin(), it));
+ }
+ }
+ }
+};
+
+} // namespace cover_complex
+
+} // namespace Gudhi
+
+#endif // GIC_H_
diff --git a/include/gudhi/Kd_tree_search.h b/include/gudhi/Kd_tree_search.h
index ef428002..96bbeb36 100644
--- a/include/gudhi/Kd_tree_search.h
+++ b/include/gudhi/Kd_tree_search.h
@@ -271,8 +271,7 @@ class Kd_tree_search {
m_tree.search(it, Fuzzy_sphere(p, radius, eps, m_tree.traits()));
}
- int tree_depth() const
- {
+ int tree_depth() const {
return m_tree.root()->depth();
}
diff --git a/include/gudhi/Neighbors_finder.h b/include/gudhi/Neighbors_finder.h
index a6b9b021..87c7cee5 100644
--- a/include/gudhi/Neighbors_finder.h
+++ b/include/gudhi/Neighbors_finder.h
@@ -32,6 +32,7 @@
#include <unordered_set>
#include <vector>
+#include <algorithm> // for std::max
namespace Gudhi {
@@ -44,7 +45,7 @@ struct Square_query {
typedef Internal_point Point_d;
typedef double FT;
bool contains(Point_d p) const {
- return std::abs(p.x()-c.x()) <= size && std::abs(p.y()-c.y()) <= size;
+ return std::max(std::abs(p.x()-c.x()), std::abs(p.y()-c.y())) <= size;
}
bool inner_range_intersects(CGAL::Kd_tree_rectangle<FT, D> const&r) const {
return
diff --git a/include/gudhi/PSSK.h b/include/gudhi/PSSK.h
new file mode 100644
index 00000000..630f5623
--- /dev/null
+++ b/include/gudhi/PSSK.h
@@ -0,0 +1,168 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PSSK_H_
+#define PSSK_H_
+
+// gudhi include
+#include <gudhi/Persistence_heat_maps.h>
+
+#include <limits>
+#include <utility>
+#include <vector>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+/**
+* This is a version of a representation presented in https://arxiv.org/abs/1412.6821
+* In that paper the authors are using the representation just to compute kernel. Over here, we extend the usability by
+*far.
+* Note that the version presented here is not exact, since we are discretizing the kernel.
+* The only difference with respect to the original class is the method of creation. We have full (square) image, and for
+*every point (p,q), we add a kernel at (p,q) and the negative kernel
+* at (q,p)
+**/
+
+class PSSK : public Persistence_heat_maps<constant_scaling_function> {
+ public:
+ PSSK() : Persistence_heat_maps() {}
+
+ PSSK(const std::vector<std::pair<double, double> >& interval,
+ std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1), size_t number_of_pixels = 1000,
+ double min_ = -1, double max_ = -1)
+ : Persistence_heat_maps() {
+ this->construct(interval, filter, number_of_pixels, min_, max_);
+ }
+
+ PSSK(const char* filename, std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1),
+ size_t number_of_pixels = 1000, double min_ = -1, double max_ = -1,
+ unsigned dimension = std::numeric_limits<unsigned>::max())
+ : Persistence_heat_maps() {
+ std::vector<std::pair<double, double> > intervals_;
+ if (dimension == std::numeric_limits<unsigned>::max()) {
+ intervals_ = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ intervals_ = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->construct(intervals_, filter, number_of_pixels, min_, max_);
+ }
+
+ protected:
+ void construct(const std::vector<std::pair<double, double> >& intervals_,
+ std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1),
+ size_t number_of_pixels = 1000, double min_ = -1, double max_ = -1);
+};
+
+// if min_ == max_, then the program is requested to set up the values itself based on persistence intervals
+void PSSK::construct(const std::vector<std::pair<double, double> >& intervals_,
+ std::vector<std::vector<double> > filter, size_t number_of_pixels, double min_, double max_) {
+ bool dbg = false;
+ if (dbg) {
+ std::cerr << "Entering construct procedure \n";
+ getchar();
+ }
+
+ if (min_ == max_) {
+ // in this case, we want the program to set up the min_ and max_ values by itself.
+ min_ = std::numeric_limits<int>::max();
+ max_ = -std::numeric_limits<int>::max();
+
+ for (size_t i = 0; i != intervals_.size(); ++i) {
+ if (intervals_[i].first < min_) min_ = intervals_[i].first;
+ if (intervals_[i].second > max_) max_ = intervals_[i].second;
+ }
+ // now we have the structure filled in, and moreover we know min_ and max_ values of the interval, so we know the
+ // range.
+
+ // add some more space:
+ min_ -= fabs(max_ - min_) / 100;
+ max_ += fabs(max_ - min_) / 100;
+ }
+
+ if (dbg) {
+ std::cerr << "min_ : " << min_ << std::endl;
+ std::cerr << "max_ : " << max_ << std::endl;
+ std::cerr << "number_of_pixels : " << number_of_pixels << std::endl;
+ getchar();
+ }
+
+ this->min_ = min_;
+ this->max_ = max_;
+
+ // initialization of the structure heat_map
+ std::vector<std::vector<double> > heat_map_;
+ for (size_t i = 0; i != number_of_pixels; ++i) {
+ std::vector<double> v(number_of_pixels, 0);
+ heat_map_.push_back(v);
+ }
+ this->heat_map = heat_map_;
+
+ if (dbg) std::cerr << "Done creating of the heat map, now we will fill in the structure \n";
+
+ for (size_t pt_nr = 0; pt_nr != intervals_.size(); ++pt_nr) {
+ // compute the value of intervals_[pt_nr] in the grid:
+ int x_grid =
+ static_cast<int>((intervals_[pt_nr].first - this->min_) / (this->max_ - this->min_) * number_of_pixels);
+ int y_grid =
+ static_cast<int>((intervals_[pt_nr].second - this->min_) / (this->max_ - this->min_) * number_of_pixels);
+
+ if (dbg) {
+ std::cerr << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl;
+ std::cerr << "x_grid : " << x_grid << std::endl;
+ std::cerr << "y_grid : " << y_grid << std::endl;
+ }
+
+ // x_grid and y_grid gives a center of the kernel. We want to have its lower left corner. To get this, we need to
+ // shift x_grid and y_grid by a grid diameter.
+ x_grid -= filter.size() / 2;
+ y_grid -= filter.size() / 2;
+ // note that the numbers x_grid and y_grid may be negative.
+
+ if (dbg) {
+ std::cerr << "After shift : \n";
+ std::cerr << "x_grid : " << x_grid << std::endl;
+ std::cerr << "y_grid : " << y_grid << std::endl;
+ std::cerr << "filter.size() : " << filter.size() << std::endl;
+ getchar();
+ }
+
+ for (size_t i = 0; i != filter.size(); ++i) {
+ for (size_t j = 0; j != filter.size(); ++j) {
+ // if the point (x_grid+i,y_grid+j) is the correct point in the grid.
+ if (((x_grid + i) >= 0) && (x_grid + i < this->heat_map.size()) && ((y_grid + j) >= 0) &&
+ (y_grid + j < this->heat_map.size())) {
+ if (dbg) {
+ std::cerr << y_grid + j << " " << x_grid + i << std::endl;
+ }
+ this->heat_map[y_grid + j][x_grid + i] += filter[i][j];
+ this->heat_map[x_grid + i][y_grid + j] += -filter[i][j];
+ }
+ }
+ }
+ }
+} // construct
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PSSK_H_
diff --git a/include/gudhi/Persistence_heat_maps.h b/include/gudhi/Persistence_heat_maps.h
new file mode 100644
index 00000000..a80c3c40
--- /dev/null
+++ b/include/gudhi/Persistence_heat_maps.h
@@ -0,0 +1,919 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_HEAT_MAPS_H_
+#define PERSISTENCE_HEAT_MAPS_H_
+
+// gudhi include
+#include <gudhi/read_persistence_from_file.h>
+#include <gudhi/common_persistence_representations.h>
+
+// standard include
+#include <vector>
+#include <sstream>
+#include <iostream>
+#include <cmath>
+#include <limits>
+#include <algorithm>
+#include <utility>
+#include <string>
+#include <functional>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+/**
+ * This is a simple procedure to create n by n (or 2*pixel_radius times 2*pixel_radius cubical approximation of a
+ *Gaussian kernel.
+**/
+std::vector<std::vector<double> > create_Gaussian_filter(size_t pixel_radius, double sigma) {
+ bool dbg = false;
+ // we are computing the kernel mask to 2 standard deviations away from the center. We discretize it in a grid of a
+ // size 2*pixel_radius times 2*pixel_radius.
+
+ double r = 0;
+ double sigma_sqr = sigma * sigma;
+
+ // sum is for normalization
+ double sum = 0;
+
+ // initialization of a kernel:
+ std::vector<std::vector<double> > kernel(2 * pixel_radius + 1);
+ for (size_t i = 0; i != kernel.size(); ++i) {
+ std::vector<double> v(2 * pixel_radius + 1, 0);
+ kernel[i] = v;
+ }
+
+ if (dbg) {
+ std::cerr << "Kernel initialize \n";
+ std::cerr << "pixel_radius : " << pixel_radius << std::endl;
+ std::cerr << "kernel.size() : " << kernel.size() << std::endl;
+ getchar();
+ }
+
+ for (int x = -pixel_radius; x <= static_cast<int>(pixel_radius); x++) {
+ for (int y = -pixel_radius; y <= static_cast<int>(pixel_radius); y++) {
+ double real_x = 2 * sigma * x / pixel_radius;
+ double real_y = 2 * sigma * y / pixel_radius;
+ r = sqrt(real_x * real_x + real_y * real_y);
+ kernel[x + pixel_radius][y + pixel_radius] = (exp(-(r * r) / sigma_sqr)) / (3.141592 * sigma_sqr);
+ sum += kernel[x + pixel_radius][y + pixel_radius];
+ }
+ }
+
+ // normalize the kernel
+ for (size_t i = 0; i != kernel.size(); ++i) {
+ for (size_t j = 0; j != kernel[i].size(); ++j) {
+ kernel[i][j] /= sum;
+ }
+ }
+
+ if (dbg) {
+ std::cerr << "Here is the kernel : \n";
+ for (size_t i = 0; i != kernel.size(); ++i) {
+ for (size_t j = 0; j != kernel[i].size(); ++j) {
+ std::cerr << kernel[i][j] << " ";
+ }
+ std::cerr << std::endl;
+ }
+ }
+ return kernel;
+}
+
+/*
+* There are various options to scale the points depending on their location. One can for instance:
+* (1) do nothing (scale all of them with the weight 1), as in the function constant_function
+* (2) Scale them by the distance to the diagonal. This is implemented in function
+* (3) Scale them with the square of their distance to diagonal. This is implemented in function
+* (4) Scale them with
+*/
+
+/**
+ * This is one of a scaling functions used to weight points depending on their persistence and/or location in the
+ *diagram.
+ * This particular functionality is a function which always assign value 1 to a point in the diagram.
+**/
+class constant_scaling_function {
+ public:
+ double operator()(const std::pair<double, double>& point_in_diagram) { return 1; }
+};
+
+/**
+ * This is one of a scaling functions used to weight points depending on their persistence and/or location in the
+ *diagram.
+ * The scaling given by this function to a point (b,d) is Euclidean distance of (b,d) from diagonal.
+**/
+class distance_from_diagonal_scaling {
+ public:
+ double operator()(const std::pair<double, double>& point_in_diagram) {
+ // (point_in_diagram.first+point_in_diagram.second)/2.0
+ return sqrt(pow((point_in_diagram.first - (point_in_diagram.first + point_in_diagram.second) / 2.0), 2) +
+ pow((point_in_diagram.second - (point_in_diagram.first + point_in_diagram.second) / 2.0), 2));
+ }
+};
+
+/**
+ * This is one of a scaling functions used to weight points depending on their persistence and/or location in the
+ *diagram.
+ * The scaling given by this function to a point (b,d) is a square of Euclidean distance of (b,d) from diagonal.
+**/
+class squared_distance_from_diagonal_scaling {
+ public:
+ double operator()(const std::pair<double, double>& point_in_diagram) {
+ return pow((point_in_diagram.first - (point_in_diagram.first + point_in_diagram.second) / 2.0), 2) +
+ pow((point_in_diagram.second - (point_in_diagram.first + point_in_diagram.second) / 2.0), 2);
+ }
+};
+
+/**
+ * This is one of a scaling functions used to weight points depending on their persistence and/or location in the
+ *diagram.
+ * The scaling given by this function to a point (b,d) is an arctan of a persistence of a point (i.e. arctan( b-d ).
+**/
+class arc_tan_of_persistence_of_point {
+ public:
+ double operator()(const std::pair<double, double>& point_in_diagram) {
+ return atan(point_in_diagram.second - point_in_diagram.first);
+ }
+};
+
+/**
+ * This is one of a scaling functions used to weight points depending on their persistence and/or location in the
+ *diagram.
+ * This scaling function do not only depend on a point (p,d) in the diagram, but it depends on the whole diagram.
+ * The longest persistence pair get a scaling 1. Any other pair get a scaling belong to [0,1], which is proportional
+ * to the persistence of that pair.
+**/
+class weight_by_setting_maximal_interval_to_have_length_one {
+ public:
+ weight_by_setting_maximal_interval_to_have_length_one(double len) : letngth_of_maximal_interval(len) {}
+ double operator()(const std::pair<double, double>& point_in_diagram) {
+ return (point_in_diagram.second - point_in_diagram.first) / this->letngth_of_maximal_interval;
+ }
+
+ private:
+ double letngth_of_maximal_interval;
+};
+
+/**
+ * \class Persistence_heat_maps Persistence_heat_maps.h gudhi/Persistence_heat_maps.h
+ * \brief A class implementing persistence heat maps.
+ *
+ * \ingroup Persistence_representations
+**/
+
+// This class implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+// Real_valued_topological_data, Topological_data_with_averages, Topological_data_with_scalar_product
+template <typename Scalling_of_kernels = constant_scaling_function>
+class Persistence_heat_maps {
+ public:
+ /**
+ * The default constructor. A scaling function from the diagonal is set up to a constant function. The image is not
+ *erased below the diagonal. The Gaussian have diameter 5.
+ **/
+ Persistence_heat_maps() {
+ Scalling_of_kernels f;
+ this->f = f;
+ this->erase_below_diagonal = false;
+ this->min_ = this->max_ = 0;
+ this->set_up_parameters_for_basic_classes();
+ }
+
+ /**
+ * Construction that takes at the input the following parameters:
+ * (1) A vector of pairs of doubles (representing persistence intervals). All other parameters are optional. They are:
+ * (2) a Gaussian filter generated by create_Gaussian_filter filter (the default value of this variable is a Gaussian
+ *filter of a radius 5),
+ * (3) a boolean value which determines if the area of image below diagonal should, or should not be erased (it will
+ *be erased by default).
+ * (4) a number of pixels in each direction (set to 1000 by default).
+ * (5) a min x and y value of points that are to be taken into account. By default it is set to
+ *std::numeric_limits<double>::max(), in which case the program compute the values based on the data,
+ * (6) a max x and y value of points that are to be taken into account. By default it is set to
+ *std::numeric_limits<double>::max(), in which case the program compute the values based on the data.
+ **/
+ Persistence_heat_maps(const std::vector<std::pair<double, double> >& interval,
+ std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1),
+ bool erase_below_diagonal = false, size_t number_of_pixels = 1000,
+ double min_ = std::numeric_limits<double>::max(),
+ double max_ = std::numeric_limits<double>::max());
+
+ /**
+ * Construction that takes at the input a name of a file with persistence intervals, a filter (radius 5 by
+ *default), a scaling function (constant by default), a boolean value which determines if the area of image below
+ *diagonal should, or should not be erased (should by default). The next parameter is the number of pixels in each
+ *direction (set to 1000 by default) and min and max values of images (both set to std::numeric_limits<double>::max()
+ *by default. If this is the case, the program will pick the right values based on the data).
+ **/
+ /**
+ * Construction that takes at the input the following parameters:
+ * (1) A name of a file with persistence intervals. The file should be readable by the function
+ *read_persistence_intervals_in_one_dimension_from_file. All other parameters are optional. They are:
+ * (2) a Gaussian filter generated by create_Gaussian_filter filter (the default value of this variable is a Gaussian
+ *filter of a radius 5),
+ * (3) a boolean value which determines if the area of image below diagonal should, or should not be erased (it will
+ *be erased by default).
+ * (4) a number of pixels in each direction (set to 1000 by default).
+ * (5) a min x and y value of points that are to be taken into account. By default it is set to
+ *std::numeric_limits<double>::max(), in which case the program compute the values based on the data,
+ * (6) a max x and y value of points that are to be taken into account. By default it is set to
+ *std::numeric_limits<double>::max(), in which case the program compute the values based on the data.
+ **/
+ Persistence_heat_maps(const char* filename, std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1),
+ bool erase_below_diagonal = false, size_t number_of_pixels = 1000,
+ double min_ = std::numeric_limits<double>::max(),
+ double max_ = std::numeric_limits<double>::max(),
+ unsigned dimension = std::numeric_limits<unsigned>::max());
+
+ /**
+ * Compute a mean value of a collection of heat maps and store it in the current object. Note that all the persistence
+ *maps send in a vector to this procedure need to have the same parameters.
+ * If this is not the case, the program will throw an exception.
+ **/
+ void compute_mean(const std::vector<Persistence_heat_maps*>& maps);
+
+ /**
+ * Compute a median value of a collection of heat maps and store it in the current object. Note that all the
+ *persistence maps send in a vector to this procedure need to have the same parameters.
+ * If this is not the case, the program will throw an exception.
+ **/
+ void compute_median(const std::vector<Persistence_heat_maps*>& maps);
+
+ /**
+ * Compute a percentage of active (i.e) values above the cutoff of a collection of heat maps.
+ **/
+ void compute_percentage_of_active(const std::vector<Persistence_heat_maps*>& maps, size_t cutoff = 1);
+
+ // put to file subroutine
+ /**
+ * The function outputs the persistence image to a text file. The format as follow:
+ * In the first line, the values min and max of the image are stored
+ * In the next lines, we have the persistence images in a form of a bitmap image.
+ **/
+ void print_to_file(const char* filename) const;
+
+ /**
+ * A function that load a heat map from file to the current object (and erase whatever was stored in the current
+ *object before).
+ **/
+ void load_from_file(const char* filename);
+
+ /**
+ * The procedure checks if min_, max_ and this->heat_maps sizes are the same.
+ **/
+ inline bool check_if_the_same(const Persistence_heat_maps& second) const {
+ bool dbg = false;
+ if (this->heat_map.size() != second.heat_map.size()) {
+ if (dbg)
+ std::cerr << "this->heat_map.size() : " << this->heat_map.size()
+ << " \n second.heat_map.size() : " << second.heat_map.size() << std::endl;
+ return false;
+ }
+ if (this->min_ != second.min_) {
+ if (dbg) std::cerr << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl;
+ return false;
+ }
+ if (this->max_ != second.max_) {
+ if (dbg) std::cerr << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl;
+ return false;
+ }
+ // in the other case we may assume that the persistence images are defined on the same domain.
+ return true;
+ }
+
+ /**
+ * Return minimal range value of persistent image.
+ **/
+ inline double get_min() const { return this->min_; }
+
+ /**
+ * Return maximal range value of persistent image.
+ **/
+ inline double get_max() const { return this->max_; }
+
+ /**
+ * Operator == to check if to persistence heat maps are the same.
+ **/
+ bool operator==(const Persistence_heat_maps& rhs) const {
+ bool dbg = false;
+ if (!this->check_if_the_same(rhs)) {
+ if (dbg) std::cerr << "The domains are not the same \n";
+ return false; // in this case, the domains are not the same, so the maps cannot be the same.
+ }
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ if (!almost_equal(this->heat_map[i][j], rhs.heat_map[i][j])) {
+ if (dbg) {
+ std::cerr << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl;
+ std::cerr << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl;
+ }
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Operator != to check if to persistence heat maps are different.
+ **/
+ bool operator!=(const Persistence_heat_maps& rhs) const { return !((*this) == rhs); }
+
+ /**
+ * A function to generate a gnuplot script to visualize the persistent image.
+ **/
+ void plot(const char* filename) const;
+
+ template <typename Operation_type>
+ friend Persistence_heat_maps operation_on_pair_of_heat_maps(const Persistence_heat_maps& first,
+ const Persistence_heat_maps& second,
+ Operation_type operation) {
+ // first check if the heat maps are compatible
+ if (!first.check_if_the_same(second)) {
+ std::cerr << "Sizes of the heat maps are not compatible. The program will now terminate \n";
+ throw "Sizes of the heat maps are not compatible. The program will now terminate \n";
+ }
+ Persistence_heat_maps result;
+ result.min_ = first.min_;
+ result.max_ = first.max_;
+ result.heat_map.reserve(first.heat_map.size());
+ for (size_t i = 0; i != first.heat_map.size(); ++i) {
+ std::vector<double> v;
+ v.reserve(first.heat_map[i].size());
+ for (size_t j = 0; j != first.heat_map[i].size(); ++j) {
+ v.push_back(operation(first.heat_map[i][j], second.heat_map[i][j]));
+ }
+ result.heat_map.push_back(v);
+ }
+ return result;
+ } // operation_on_pair_of_heat_maps
+
+ /**
+ * Multiplication of Persistence_heat_maps by scalar (so that all values of the heat map gets multiplied by that
+ *scalar).
+ **/
+ Persistence_heat_maps multiply_by_scalar(double scalar) const {
+ Persistence_heat_maps result;
+ result.min_ = this->min_;
+ result.max_ = this->max_;
+ result.heat_map.reserve(this->heat_map.size());
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ std::vector<double> v;
+ v.reserve(this->heat_map[i].size());
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ v.push_back(this->heat_map[i][j] * scalar);
+ }
+ result.heat_map.push_back(v);
+ }
+ return result;
+ }
+
+ /**
+ * This function computes a sum of two objects of a type Persistence_heat_maps.
+ **/
+ friend Persistence_heat_maps operator+(const Persistence_heat_maps& first, const Persistence_heat_maps& second) {
+ return operation_on_pair_of_heat_maps(first, second, std::plus<double>());
+ }
+ /**
+* This function computes a difference of two objects of a type Persistence_heat_maps.
+**/
+ friend Persistence_heat_maps operator-(const Persistence_heat_maps& first, const Persistence_heat_maps& second) {
+ return operation_on_pair_of_heat_maps(first, second, std::minus<double>());
+ }
+ /**
+* This function computes a product of an object of a type Persistence_heat_maps with real number.
+**/
+ friend Persistence_heat_maps operator*(double scalar, const Persistence_heat_maps& A) {
+ return A.multiply_by_scalar(scalar);
+ }
+ /**
+* This function computes a product of an object of a type Persistence_heat_maps with real number.
+**/
+ friend Persistence_heat_maps operator*(const Persistence_heat_maps& A, double scalar) {
+ return A.multiply_by_scalar(scalar);
+ }
+ /**
+* This function computes a product of an object of a type Persistence_heat_maps with real number.
+**/
+ Persistence_heat_maps operator*(double scalar) { return this->multiply_by_scalar(scalar); }
+ /**
+ * += operator for Persistence_heat_maps.
+ **/
+ Persistence_heat_maps operator+=(const Persistence_heat_maps& rhs) {
+ *this = *this + rhs;
+ return *this;
+ }
+ /**
+ * -= operator for Persistence_heat_maps.
+ **/
+ Persistence_heat_maps operator-=(const Persistence_heat_maps& rhs) {
+ *this = *this - rhs;
+ return *this;
+ }
+ /**
+ * *= operator for Persistence_heat_maps.
+ **/
+ Persistence_heat_maps operator*=(double x) {
+ *this = *this * x;
+ return *this;
+ }
+ /**
+ * /= operator for Persistence_heat_maps.
+ **/
+ Persistence_heat_maps operator/=(double x) {
+ if (x == 0) throw("In operator /=, division by 0. Program terminated.");
+ *this = *this * (1 / x);
+ return *this;
+ }
+
+ // Implementations of functions for various concepts.
+
+ /**
+ * This function produce a vector of doubles based on a persistence heat map. It is required in a concept
+ * Vectorized_topological_data
+ */
+ std::vector<double> vectorize(int number_of_function) const;
+ /**
+ * This function return the number of functions that allows vectorization of persistence heat map. It is required
+ *in a concept Vectorized_topological_data.
+ **/
+ size_t number_of_vectorize_functions() const { return this->number_of_functions_for_vectorization; }
+
+ /**
+ * This function is required by the Real_valued_topological_data concept. It returns various projections on the
+ *persistence heat map to a real line.
+ * At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
+ *using it, keep in mind that it
+ * will be most likely changed in the next versions.
+ **/
+ double project_to_R(int number_of_function) const;
+ /**
+ * The function gives the number of possible projections to R. This function is required by the
+ *Real_valued_topological_data concept.
+ **/
+ size_t number_of_projections_to_R() const { return this->number_of_functions_for_projections_to_reals; }
+
+ /**
+ * A function to compute distance between persistence heat maps.
+ * The parameter of this function is a const reference to an object of a class Persistence_heat_maps.
+ * This function is required in Topological_data_with_distances concept.
+* For max norm distance, set power to std::numeric_limits<double>::max()
+ **/
+ double distance(const Persistence_heat_maps& second_, double power = 1) const;
+
+ /**
+ * A function to compute averaged persistence heat map, based on vector of persistence heat maps.
+ * This function is required by Topological_data_with_averages concept.
+ **/
+ void compute_average(const std::vector<Persistence_heat_maps*>& to_average);
+
+ /**
+ * A function to compute scalar product of persistence heat maps.
+ * The parameter of this function is a const reference to an object of a class Persistence_heat_maps.
+ * This function is required in Topological_data_with_scalar_product concept.
+ **/
+ double compute_scalar_product(const Persistence_heat_maps& second_) const;
+
+ // end of implementation of functions needed for concepts.
+
+ /**
+ * The x-range of the persistence heat map.
+ **/
+ std::pair<double, double> get_x_range() const { return std::make_pair(this->min_, this->max_); }
+
+ /**
+ * The y-range of the persistence heat map.
+ **/
+ std::pair<double, double> get_y_range() const { return this->get_x_range(); }
+
+ protected:
+ // private methods
+ std::vector<std::vector<double> > check_and_initialize_maps(const std::vector<Persistence_heat_maps*>& maps);
+ size_t number_of_functions_for_vectorization;
+ size_t number_of_functions_for_projections_to_reals;
+ void construct(const std::vector<std::pair<double, double> >& intervals_,
+ std::vector<std::vector<double> > filter = create_Gaussian_filter(5, 1),
+
+ bool erase_below_diagonal = false, size_t number_of_pixels = 1000,
+ double min_ = std::numeric_limits<double>::max(), double max_ = std::numeric_limits<double>::max());
+
+ void set_up_parameters_for_basic_classes() {
+ this->number_of_functions_for_vectorization = 1;
+ this->number_of_functions_for_projections_to_reals = 1;
+ }
+
+ // data
+ Scalling_of_kernels f;
+ bool erase_below_diagonal;
+ double min_;
+ double max_;
+ std::vector<std::vector<double> > heat_map;
+};
+
+// if min_ == max_, then the program is requested to set up the values itself based on persistence intervals
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std::pair<double, double> >& intervals_,
+ std::vector<std::vector<double> > filter,
+ bool erase_below_diagonal, size_t number_of_pixels,
+ double min_, double max_) {
+ bool dbg = false;
+ if (dbg) std::cerr << "Entering construct procedure \n";
+ Scalling_of_kernels f;
+ this->f = f;
+
+ if (dbg) std::cerr << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl;
+
+ if (min_ == max_) {
+ if (dbg) std::cerr << "min and max parameters will be determined based on intervals \n";
+ // in this case, we want the program to set up the min_ and max_ values by itself.
+ min_ = std::numeric_limits<int>::max();
+ max_ = -std::numeric_limits<int>::max();
+
+ for (size_t i = 0; i != intervals_.size(); ++i) {
+ if (intervals_[i].first < min_) min_ = intervals_[i].first;
+ if (intervals_[i].second > max_) max_ = intervals_[i].second;
+ }
+ // now we have the structure filled in, and moreover we know min_ and max_ values of the interval, so we know the
+ // range.
+
+ // add some more space:
+ min_ -= fabs(max_ - min_) / 100;
+ max_ += fabs(max_ - min_) / 100;
+ }
+
+ if (dbg) {
+ std::cerr << "min_ : " << min_ << std::endl;
+ std::cerr << "max_ : " << max_ << std::endl;
+ std::cerr << "number_of_pixels : " << number_of_pixels << std::endl;
+ getchar();
+ }
+
+ this->min_ = min_;
+ this->max_ = max_;
+
+ // initialization of the structure heat_map
+ std::vector<std::vector<double> > heat_map_;
+ for (size_t i = 0; i != number_of_pixels; ++i) {
+ std::vector<double> v(number_of_pixels, 0);
+ heat_map_.push_back(v);
+ }
+ this->heat_map = heat_map_;
+
+ if (dbg) std::cerr << "Done creating of the heat map, now we will fill in the structure \n";
+
+ for (size_t pt_nr = 0; pt_nr != intervals_.size(); ++pt_nr) {
+ // compute the value of intervals_[pt_nr] in the grid:
+ int x_grid =
+ static_cast<int>((intervals_[pt_nr].first - this->min_) / (this->max_ - this->min_) * number_of_pixels);
+ int y_grid =
+ static_cast<int>((intervals_[pt_nr].second - this->min_) / (this->max_ - this->min_) * number_of_pixels);
+
+ if (dbg) {
+ std::cerr << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl;
+ std::cerr << "x_grid : " << x_grid << std::endl;
+ std::cerr << "y_grid : " << y_grid << std::endl;
+ }
+
+ // x_grid and y_grid gives a center of the kernel. We want to have its lower left corner. To get this, we need to
+ // shift x_grid and y_grid by a grid diameter.
+ x_grid -= filter.size() / 2;
+ y_grid -= filter.size() / 2;
+ // note that the numbers x_grid and y_grid may be negative.
+
+ if (dbg) {
+ std::cerr << "After shift : \n";
+ std::cerr << "x_grid : " << x_grid << std::endl;
+ std::cerr << "y_grid : " << y_grid << std::endl;
+ }
+
+ double scaling_value = this->f(intervals_[pt_nr]);
+
+ for (size_t i = 0; i != filter.size(); ++i) {
+ for (size_t j = 0; j != filter.size(); ++j) {
+ // if the point (x_grid+i,y_grid+j) is the correct point in the grid.
+ if (((x_grid + i) >= 0) && (x_grid + i < this->heat_map.size()) && ((y_grid + j) >= 0) &&
+ (y_grid + j < this->heat_map.size())) {
+ if (dbg) {
+ std::cerr << y_grid + j << " " << x_grid + i << std::endl;
+ }
+ this->heat_map[y_grid + j][x_grid + i] += scaling_value * filter[i][j];
+ if (dbg) {
+ std::cerr << "Position : (" << x_grid + i << "," << y_grid + j
+ << ") got increased by the value : " << filter[i][j] << std::endl;
+ }
+ }
+ }
+ }
+ }
+
+ // now it remains to cut everything below diagonal if the user wants us to.
+ if (erase_below_diagonal) {
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = i; j != this->heat_map.size(); ++j) {
+ this->heat_map[i][j] = 0;
+ }
+ }
+ }
+} // construct
+
+template <typename Scalling_of_kernels>
+Persistence_heat_maps<Scalling_of_kernels>::Persistence_heat_maps(
+ const std::vector<std::pair<double, double> >& interval, std::vector<std::vector<double> > filter,
+ bool erase_below_diagonal, size_t number_of_pixels, double min_, double max_) {
+ this->construct(interval, filter, erase_below_diagonal, number_of_pixels, min_, max_);
+ this->set_up_parameters_for_basic_classes();
+}
+
+template <typename Scalling_of_kernels>
+Persistence_heat_maps<Scalling_of_kernels>::Persistence_heat_maps(const char* filename,
+ std::vector<std::vector<double> > filter,
+ bool erase_below_diagonal, size_t number_of_pixels,
+ double min_, double max_, unsigned dimension) {
+ std::vector<std::pair<double, double> > intervals_;
+ if (dimension == std::numeric_limits<unsigned>::max()) {
+ intervals_ = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ intervals_ = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->construct(intervals_, filter, erase_below_diagonal, number_of_pixels, min_, max_);
+ this->set_up_parameters_for_basic_classes();
+}
+
+template <typename Scalling_of_kernels>
+std::vector<std::vector<double> > Persistence_heat_maps<Scalling_of_kernels>::check_and_initialize_maps(
+ const std::vector<Persistence_heat_maps*>& maps) {
+ // checking if all the heat maps are of the same size:
+ for (size_t i = 0; i != maps.size(); ++i) {
+ if (maps[i]->heat_map.size() != maps[0]->heat_map.size()) {
+ std::cerr << "Sizes of Persistence_heat_maps are not compatible. The program will terminate now \n";
+ throw "Sizes of Persistence_heat_maps are not compatible. The program will terminate now \n";
+ }
+ if (maps[i]->heat_map[0].size() != maps[0]->heat_map[0].size()) {
+ std::cerr << "Sizes of Persistence_heat_maps are not compatible. The program will terminate now \n";
+ throw "Sizes of Persistence_heat_maps are not compatible. The program will terminate now \n";
+ }
+ }
+ std::vector<std::vector<double> > heat_maps(maps[0]->heat_map.size());
+ for (size_t i = 0; i != maps[0]->heat_map.size(); ++i) {
+ std::vector<double> v(maps[0]->heat_map[0].size(), 0);
+ heat_maps[i] = v;
+ }
+ return heat_maps;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::compute_median(const std::vector<Persistence_heat_maps*>& maps) {
+ std::vector<std::vector<double> > heat_maps = this->check_and_initialize_maps(maps);
+
+ std::vector<double> to_compute_median(maps.size());
+ for (size_t i = 0; i != heat_maps.size(); ++i) {
+ for (size_t j = 0; j != heat_maps[i].size(); ++j) {
+ for (size_t map_no = 0; map_no != maps.size(); ++map_no) {
+ to_compute_median[map_no] = maps[map_no]->heat_map[i][j];
+ }
+ std::nth_element(to_compute_median.begin(), to_compute_median.begin() + to_compute_median.size() / 2,
+ to_compute_median.end());
+ heat_maps[i][j] = to_compute_median[to_compute_median.size() / 2];
+ }
+ }
+ this->heat_map = heat_maps;
+ this->min_ = maps[0]->min_;
+ this->max_ = maps[0]->max_;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::compute_mean(const std::vector<Persistence_heat_maps*>& maps) {
+ std::vector<std::vector<double> > heat_maps = this->check_and_initialize_maps(maps);
+ for (size_t i = 0; i != heat_maps.size(); ++i) {
+ for (size_t j = 0; j != heat_maps[i].size(); ++j) {
+ double mean = 0;
+ for (size_t map_no = 0; map_no != maps.size(); ++map_no) {
+ mean += maps[map_no]->heat_map[i][j];
+ }
+ heat_maps[i][j] = mean / static_cast<double>(maps.size());
+ }
+ }
+ this->heat_map = heat_maps;
+ this->min_ = maps[0]->min_;
+ this->max_ = maps[0]->max_;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::compute_percentage_of_active(
+ const std::vector<Persistence_heat_maps*>& maps, size_t cutoff) {
+ std::vector<std::vector<double> > heat_maps = this->check_and_initialize_maps(maps);
+
+ for (size_t i = 0; i != heat_maps.size(); ++i) {
+ for (size_t j = 0; j != heat_maps[i].size(); ++j) {
+ size_t number_of_active_levels = 0;
+ for (size_t map_no = 0; map_no != maps.size(); ++map_no) {
+ if (maps[map_no]->heat_map[i][j]) number_of_active_levels++;
+ }
+ if (number_of_active_levels > cutoff) {
+ heat_maps[i][j] = number_of_active_levels;
+ } else {
+ heat_maps[i][j] = 0;
+ }
+ }
+ }
+ this->heat_map = heat_maps;
+ this->min_ = maps[0]->min_;
+ this->max_ = maps[0]->max_;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::plot(const char* filename) const {
+ std::ofstream out;
+ std::stringstream gnuplot_script;
+ gnuplot_script << filename << "_GnuplotScript";
+
+ out.open(gnuplot_script.str().c_str());
+ out << "plot '-' matrix with image" << std::endl;
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ out << this->heat_map[i][j] << " ";
+ }
+ out << std::endl;
+ }
+ out.close();
+ std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ << gnuplot_script.str().c_str() << "\'\"" << std::endl;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::print_to_file(const char* filename) const {
+ std::ofstream out;
+ out.open(filename);
+
+ // First we store this->min_ and this->max_ values:
+ out << this->min_ << " " << this->max_ << std::endl;
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ out << this->heat_map[i][j] << " ";
+ }
+ out << std::endl;
+ }
+ out.close();
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::load_from_file(const char* filename) {
+ bool dbg = false;
+
+ std::ifstream in;
+ in.open(filename);
+
+ // checking if the file exist / if it was open.
+ if (!in.good()) {
+ std::cerr << "The file : " << filename << " do not exist. The program will now terminate \n";
+ throw "The persistence landscape file do not exist. The program will now terminate \n";
+ }
+
+ // now we read the file one by one.
+
+ in >> this->min_ >> this->max_;
+ if (dbg) {
+ std::cerr << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl;
+ }
+
+ std::string temp;
+ std::getline(in, temp);
+ while (in.good()) {
+ std::getline(in, temp);
+ std::stringstream lineSS;
+ lineSS << temp;
+
+ std::vector<double> line_of_heat_map;
+ while (lineSS.good()) {
+ double point;
+
+ lineSS >> point;
+ line_of_heat_map.push_back(point);
+ if (dbg) {
+ std::cout << point << " ";
+ }
+ }
+ if (dbg) {
+ std::cout << std::endl;
+ getchar();
+ }
+
+ if (in.good()) this->heat_map.push_back(line_of_heat_map);
+ }
+ in.close();
+ if (dbg) std::cout << "Done \n";
+}
+
+// Concretizations of virtual methods:
+template <typename Scalling_of_kernels>
+std::vector<double> Persistence_heat_maps<Scalling_of_kernels>::vectorize(int number_of_function) const {
+ // convert this->heat_map into one large vector:
+ size_t size_of_result = 0;
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ size_of_result += this->heat_map[i].size();
+ }
+
+ std::vector<double> result;
+ result.reserve(size_of_result);
+
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ result.push_back(this->heat_map[i][j]);
+ }
+ }
+
+ return result;
+}
+
+template <typename Scalling_of_kernels>
+double Persistence_heat_maps<Scalling_of_kernels>::distance(const Persistence_heat_maps& second, double power) const {
+ // first we need to check if (*this) and second are defined on the same domain and have the same dimensions:
+ if (!this->check_if_the_same(second)) {
+ std::cerr << "The persistence images are of non compatible sizes. We cannot therefore compute distance between "
+ "them. The program will now terminate";
+ throw "The persistence images are of non compatible sizes. The program will now terminate";
+ }
+
+ // if we are here, we know that the two persistence images are defined on the same domain, so we can start computing
+ // their distances:
+
+ double distance = 0;
+ if (power < std::numeric_limits<double>::max()) {
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ distance += pow(fabs(this->heat_map[i][j] - second.heat_map[i][j]), power);
+ }
+ }
+ } else {
+ // in this case, we compute max norm distance
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ if (distance < fabs(this->heat_map[i][j] - second.heat_map[i][j])) {
+ distance = fabs(this->heat_map[i][j] - second.heat_map[i][j]);
+ }
+ }
+ }
+ }
+ return distance;
+}
+
+template <typename Scalling_of_kernels>
+double Persistence_heat_maps<Scalling_of_kernels>::project_to_R(int number_of_function) const {
+ double result = 0;
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ result += this->heat_map[i][j];
+ }
+ }
+ return result;
+}
+
+template <typename Scalling_of_kernels>
+void Persistence_heat_maps<Scalling_of_kernels>::compute_average(
+ const std::vector<Persistence_heat_maps*>& to_average) {
+ this->compute_mean(to_average);
+}
+
+template <typename Scalling_of_kernels>
+double Persistence_heat_maps<Scalling_of_kernels>::compute_scalar_product(const Persistence_heat_maps& second) const {
+ // first we need to check if (*this) and second are defined on the same domain and have the same dimensions:
+ if (!this->check_if_the_same(second)) {
+ std::cerr << "The persistence images are of non compatible sizes. We cannot therefore compute distance between "
+ "them. The program will now terminate";
+ throw "The persistence images are of non compatible sizes. The program will now terminate";
+ }
+
+ // if we are here, we know that the two persistence images are defined on the same domain, so we can start computing
+ // their scalar product:
+ double scalar_prod = 0;
+ for (size_t i = 0; i != this->heat_map.size(); ++i) {
+ for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
+ scalar_prod += this->heat_map[i][j] * second.heat_map[i][j];
+ }
+ }
+ return scalar_prod;
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_HEAT_MAPS_H_
diff --git a/include/gudhi/Persistence_intervals.h b/include/gudhi/Persistence_intervals.h
new file mode 100644
index 00000000..3d04d8b7
--- /dev/null
+++ b/include/gudhi/Persistence_intervals.h
@@ -0,0 +1,570 @@
+/* This file is part of the Gudhi hiLibrary. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_INTERVALS_H_
+#define PERSISTENCE_INTERVALS_H_
+
+// gudhi include
+#include <gudhi/read_persistence_from_file.h>
+
+// standard include
+#include <limits>
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include <algorithm>
+#include <cmath>
+#include <functional>
+#include <utility>
+#include <string>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+/**
+ * This class implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+ *Real_valued_topological_data
+**/
+class Persistence_intervals {
+ public:
+ /**
+ * This is a constructor of a class Persistence_intervals from a text file. Each line of the input file is supposed to
+ *contain two numbers of a type double (or convertible to double)
+ * representing the birth and the death of the persistence interval. If the pairs are not sorted so that birth <=
+ *death, then the constructor will sort then that way.
+ * * The second parameter of a constructor is a dimension of intervals to be read from a file. If your file contains
+ *only birth-death pairs, use the default value.
+ **/
+ Persistence_intervals(const char* filename, unsigned dimension = std::numeric_limits<unsigned>::max());
+
+ /**
+ * This is a constructor of a class Persistence_intervals from a vector of pairs. Each pair is assumed to represent a
+ *persistence interval. We assume that the first elements of pairs
+ * are smaller or equal the second elements of pairs.
+ **/
+ Persistence_intervals(const std::vector<std::pair<double, double> >& intervals);
+
+ /**
+ * This procedure returns x-range of a given persistence diagram.
+ **/
+ std::pair<double, double> get_x_range() const {
+ double min_ = std::numeric_limits<int>::max();
+ double max_ = -std::numeric_limits<int>::max();
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ if (this->intervals[i].first < min_) min_ = this->intervals[i].first;
+ if (this->intervals[i].second > max_) max_ = this->intervals[i].second;
+ }
+ return std::make_pair(min_, max_);
+ }
+
+ /**
+ * This procedure returns y-range of a given persistence diagram.
+ **/
+ std::pair<double, double> get_y_range() const {
+ double min_ = std::numeric_limits<int>::max();
+ double max_ = -std::numeric_limits<int>::max();
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ if (this->intervals[i].second < min_) min_ = this->intervals[i].second;
+ if (this->intervals[i].second > max_) max_ = this->intervals[i].second;
+ }
+ return std::make_pair(min_, max_);
+ }
+
+ /**
+ * Procedure that compute the vector of lengths of the dominant (i.e. the longest) persistence intervals. The list is
+ *truncated at the parameter of the call where_to_cut (set by default to 100).
+ **/
+ std::vector<double> length_of_dominant_intervals(size_t where_to_cut = 100) const;
+
+ /**
+ * Procedure that compute the vector of the dominant (i.e. the longest) persistence intervals. The parameter of
+ *the procedure (set by default to 100) is the number of dominant intervals returned by the procedure.
+ **/
+ std::vector<std::pair<double, double> > dominant_intervals(size_t where_to_cut = 100) const;
+
+ /**
+ * Procedure to compute a histogram of interval's length. A histogram is a block plot. The number of blocks is
+ *determined by the first parameter of the function (set by default to 10).
+ * For the sake of argument let us assume that the length of the longest interval is 1 and the number of bins is
+ *10. In this case the i-th block correspond to a range between i-1/10 and i10.
+ * The vale of a block supported at the interval is the number of persistence intervals of a length between x_0
+ *and x_1.
+ **/
+ std::vector<size_t> histogram_of_lengths(size_t number_of_bins = 10) const;
+
+ /**
+ * Based on a histogram of intervals lengths computed by the function histogram_of_lengths H the procedure below
+ *computes the cumulative histogram. The i-th position of the resulting histogram
+ * is the sum of values of H for the positions from 0 to i.
+ **/
+ std::vector<size_t> cumulative_histogram_of_lengths(size_t number_of_bins = 10) const;
+
+ /**
+ * In this procedure we assume that each barcode is a characteristic function of a hight equal to its length. The
+ *persistence diagram is a sum of such a functions. The procedure below construct a function being a
+ * sum of the characteristic functions of persistence intervals. The first two parameters are the range in which the
+ *function is to be computed and the last parameter is the number of bins in
+ * the discretization of the interval [_min,_max].
+ **/
+ std::vector<double> characteristic_function_of_diagram(double x_min, double x_max, size_t number_of_bins = 10) const;
+
+ /**
+ * Cumulative version of the function characteristic_function_of_diagram
+ **/
+ std::vector<double> cumulative_characteristic_function_of_diagram(double x_min, double x_max,
+ size_t number_of_bins = 10) const;
+
+ /**
+ * Compute the function of persistence Betti numbers. The returned value is a vector of pair. First element of each
+ *pair is a place where persistence Betti numbers change.
+ * Second element of each pair is the value of Persistence Betti numbers at that point.
+ **/
+ std::vector<std::pair<double, size_t> > compute_persistent_betti_numbers() const;
+
+ /**
+ *This is a non optimal procedure that compute vector of distances from each point of diagram to its k-th nearest
+ *neighbor (k is a parameter of the program). The resulting vector is by default truncated to 10
+ *elements (this value can be changed by using the second parameter of the program). The points are returned in order
+ *from the ones which are farthest away from their k-th nearest neighbors.
+ **/
+ std::vector<double> k_n_n(size_t k, size_t where_to_cut = 10) const;
+
+ /**
+* Operator that send the diagram to a stream.
+**/
+ friend std::ostream& operator<<(std::ostream& out, const Persistence_intervals& intervals) {
+ for (size_t i = 0; i != intervals.intervals.size(); ++i) {
+ out << intervals.intervals[i].first << " " << intervals.intervals[i].second << std::endl;
+ }
+ return out;
+ }
+
+ /**
+ * Generating gnuplot script to plot the interval.
+ **/
+ void plot(const char* filename, double min_x = std::numeric_limits<double>::max(),
+ double max_x = std::numeric_limits<double>::max(), double min_y = std::numeric_limits<double>::max(),
+ double max_y = std::numeric_limits<double>::max()) const {
+ // this program create a gnuplot script file that allows to plot persistence diagram.
+ std::ofstream out;
+
+ std::stringstream gnuplot_script;
+ gnuplot_script << filename << "_GnuplotScript";
+
+ out.open(gnuplot_script.str().c_str());
+
+ std::pair<double, double> min_max_values = this->get_x_range();
+ if (min_x == max_x) {
+ out << "set xrange [" << min_max_values.first - 0.1 * (min_max_values.second - min_max_values.first) << " : "
+ << min_max_values.second + 0.1 * (min_max_values.second - min_max_values.first) << " ]" << std::endl;
+ out << "set yrange [" << min_max_values.first - 0.1 * (min_max_values.second - min_max_values.first) << " : "
+ << min_max_values.second + 0.1 * (min_max_values.second - min_max_values.first) << " ]" << std::endl;
+ } else {
+ out << "set xrange [" << min_x << " : " << max_x << " ]" << std::endl;
+ out << "set yrange [" << min_y << " : " << max_y << " ]" << std::endl;
+ }
+ out << "plot '-' using 1:2 notitle \"" << filename << "\", \\" << std::endl;
+ out << " '-' using 1:2 notitle with lp" << std::endl;
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ out << this->intervals[i].first << " " << this->intervals[i].second << std::endl;
+ }
+ out << "EOF" << std::endl;
+ out << min_max_values.first - 0.1 * (min_max_values.second - min_max_values.first) << " "
+ << min_max_values.first - 0.1 * (min_max_values.second - min_max_values.first) << std::endl;
+ out << min_max_values.second + 0.1 * (min_max_values.second - min_max_values.first) << " "
+ << min_max_values.second + 0.1 * (min_max_values.second - min_max_values.first) << std::endl;
+
+ out.close();
+
+ std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ << gnuplot_script.str().c_str() << "\'\"" << std::endl;
+ }
+
+ /**
+* Return number of points in the diagram.
+**/
+ size_t size() const { return this->intervals.size(); }
+
+ /**
+ * Return the persistence interval at the given position. Note that intervals are not sorted with respect to their
+ *lengths.
+ **/
+ inline std::pair<double, double> operator[](size_t i) const {
+ if (i >= this->intervals.size()) throw("Index out of range! Operator [], one_d_gaussians class\n");
+ return this->intervals[i];
+ }
+
+ // Implementations of functions for various concepts.
+ /**
+ * This is a simple function projecting the persistence intervals to a real number. The function we use here is a sum
+ *of squared lengths of intervals. It can be naturally interpreted as
+ * sum of step function, where the step hight it equal to the length of the interval.
+ * At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
+ *using it, keep in mind that it
+ * will be most likely changed in the next versions.
+ **/
+ double project_to_R(int number_of_function) const;
+ /**
+ * The function gives the number of possible projections to R. This function is required by the
+ *Real_valued_topological_data concept.
+ **/
+ size_t number_of_projections_to_R() const { return this->number_of_functions_for_projections_to_reals; }
+
+ /**
+ * Return a family of vectors obtained from the persistence diagram. The i-th vector consist of the length of i
+ *dominant persistence intervals.
+ **/
+ std::vector<double> vectorize(int number_of_function) const {
+ return this->length_of_dominant_intervals(number_of_function);
+ }
+ /**
+ * This function return the number of functions that allows vectorization of a persistence diagram. It is required
+ *in a concept Vectorized_topological_data.
+ **/
+ size_t number_of_vectorize_functions() const { return this->number_of_functions_for_vectorization; }
+
+ // end of implementation of functions needed for concepts.
+
+ // For visualization use output from vectorize and build histograms.
+ std::vector<std::pair<double, double> > output_for_visualization() { return this->intervals; }
+
+ protected:
+ void set_up_numbers_of_functions_for_vectorization_and_projections_to_reals() {
+ // warning, this function can be only called after filling in the intervals vector.
+ this->number_of_functions_for_vectorization = this->intervals.size();
+ this->number_of_functions_for_projections_to_reals = 1;
+ }
+
+ std::vector<std::pair<double, double> > intervals;
+ size_t number_of_functions_for_vectorization;
+ size_t number_of_functions_for_projections_to_reals;
+};
+
+Persistence_intervals::Persistence_intervals(const char* filename, unsigned dimension) {
+ if (dimension == std::numeric_limits<unsigned>::max()) {
+ this->intervals = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ this->intervals = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+} // Persistence_intervals
+
+Persistence_intervals::Persistence_intervals(const std::vector<std::pair<double, double> >& intervals_)
+ : intervals(intervals_) {
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+}
+
+std::vector<double> Persistence_intervals::length_of_dominant_intervals(size_t where_to_cut) const {
+ std::vector<double> result(this->intervals.size());
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ result[i] = this->intervals[i].second - this->intervals[i].first;
+ }
+ std::sort(result.begin(), result.end(), std::greater<double>());
+
+ result.resize(std::min(where_to_cut, result.size()));
+ return result;
+} // length_of_dominant_intervals
+
+bool compare(const std::pair<size_t, double>& first, const std::pair<size_t, double>& second) {
+ return first.second > second.second;
+}
+
+std::vector<std::pair<double, double> > Persistence_intervals::dominant_intervals(size_t where_to_cut) const {
+ bool dbg = false;
+ std::vector<std::pair<size_t, double> > position_length_vector(this->intervals.size());
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ position_length_vector[i] = std::make_pair(i, this->intervals[i].second - this->intervals[i].first);
+ }
+
+ std::sort(position_length_vector.begin(), position_length_vector.end(), compare);
+
+ std::vector<std::pair<double, double> > result;
+ result.reserve(std::min(where_to_cut, position_length_vector.size()));
+
+ for (size_t i = 0; i != std::min(where_to_cut, position_length_vector.size()); ++i) {
+ result.push_back(this->intervals[position_length_vector[i].first]);
+ if (dbg)
+ std::cerr << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second
+ << std::endl;
+ }
+
+ return result;
+} // dominant_intervals
+
+std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of_bins) const {
+ bool dbg = false;
+
+ if (dbg) std::cerr << "this->intervals.size() : " << this->intervals.size() << std::endl;
+ // first find the length of the longest interval:
+ double lengthOfLongest = 0;
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ if ((this->intervals[i].second - this->intervals[i].first) > lengthOfLongest) {
+ lengthOfLongest = this->intervals[i].second - this->intervals[i].first;
+ }
+ }
+
+ if (dbg) {
+ std::cerr << "lengthOfLongest : " << lengthOfLongest << std::endl;
+ }
+
+ // this is a container we will use to store the resulting histogram
+ std::vector<size_t> result(number_of_bins + 1, 0);
+
+ // for every persistence interval in our collection.
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ // compute its length relative to the length of the dominant interval:
+ double relative_length_of_this_interval = (this->intervals[i].second - this->intervals[i].first) / lengthOfLongest;
+
+ // given the relative length (between 0 and 1) compute to which bin should it contribute.
+ size_t position = (size_t)(relative_length_of_this_interval * number_of_bins);
+
+ ++result[position];
+
+ if (dbg) {
+ std::cerr << "i : " << i << std::endl;
+ std::cerr << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n";
+ std::cerr << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl;
+ std::cerr << "position : " << position << std::endl;
+ getchar();
+ }
+ }
+
+ if (dbg) {
+ for (size_t i = 0; i != result.size(); ++i) std::cerr << result[i] << std::endl;
+ }
+ return result;
+}
+
+std::vector<size_t> Persistence_intervals::cumulative_histogram_of_lengths(size_t number_of_bins) const {
+ std::vector<size_t> histogram = this->histogram_of_lengths(number_of_bins);
+ std::vector<size_t> result(histogram.size());
+
+ size_t sum = 0;
+ for (size_t i = 0; i != histogram.size(); ++i) {
+ sum += histogram[i];
+ result[i] = sum;
+ }
+ return result;
+}
+
+std::vector<double> Persistence_intervals::characteristic_function_of_diagram(double x_min, double x_max,
+ size_t number_of_bins) const {
+ bool dbg = false;
+
+ std::vector<double> result(number_of_bins);
+ std::fill(result.begin(), result.end(), 0);
+
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ if (dbg) {
+ std::cerr << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
+ }
+
+ size_t beginIt = 0;
+ if (this->intervals[i].first < x_min) beginIt = 0;
+ if (this->intervals[i].first >= x_max) beginIt = result.size();
+ if ((this->intervals[i].first > x_min) && (this->intervals[i].first < x_max)) {
+ beginIt = number_of_bins * (this->intervals[i].first - x_min) / (x_max - x_min);
+ }
+
+ size_t endIt = 0;
+ if (this->intervals[i].second < x_min) endIt = 0;
+ if (this->intervals[i].second >= x_max) endIt = result.size();
+ if ((this->intervals[i].second > x_min) && (this->intervals[i].second < x_max)) {
+ endIt = number_of_bins * (this->intervals[i].second - x_min) / (x_max - x_min);
+ }
+
+ if (beginIt > endIt) {
+ beginIt = endIt;
+ }
+
+ if (dbg) {
+ std::cerr << "beginIt : " << beginIt << std::endl;
+ std::cerr << "endIt : " << endIt << std::endl;
+ }
+
+ for (size_t pos = beginIt; pos != endIt; ++pos) {
+ result[pos] += ((x_max - x_min) / static_cast<double>(number_of_bins)) *
+ (this->intervals[i].second - this->intervals[i].first);
+ }
+ if (dbg) {
+ std::cerr << "Result at this stage \n";
+ for (size_t aa = 0; aa != result.size(); ++aa) {
+ std::cerr << result[aa] << " ";
+ }
+ std::cerr << std::endl;
+ }
+ }
+ return result;
+} // characteristic_function_of_diagram
+
+std::vector<double> Persistence_intervals::cumulative_characteristic_function_of_diagram(double x_min, double x_max,
+ size_t number_of_bins) const {
+ std::vector<double> intsOfBars = this->characteristic_function_of_diagram(x_min, x_max, number_of_bins);
+ std::vector<double> result(intsOfBars.size());
+ double sum = 0;
+ for (size_t i = 0; i != intsOfBars.size(); ++i) {
+ sum += intsOfBars[i];
+ result[i] = sum;
+ }
+ return result;
+} // cumulative_characteristic_function_of_diagram
+
+template <typename T>
+bool compare_first_element_of_pair(const std::pair<T, bool>& f, const std::pair<T, bool>& s) {
+ return (f.first < s.first);
+}
+
+std::vector<std::pair<double, size_t> > Persistence_intervals::compute_persistent_betti_numbers() const {
+ std::vector<std::pair<double, bool> > places_where_pbs_change(2 * this->intervals.size());
+
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ places_where_pbs_change[2 * i] = std::make_pair(this->intervals[i].first, true);
+ places_where_pbs_change[2 * i + 1] = std::make_pair(this->intervals[i].second, false);
+ }
+
+ std::sort(places_where_pbs_change.begin(), places_where_pbs_change.end(), compare_first_element_of_pair<double>);
+ size_t pbn = 0;
+ std::vector<std::pair<double, size_t> > pbns(places_where_pbs_change.size());
+ for (size_t i = 0; i != places_where_pbs_change.size(); ++i) {
+ if (places_where_pbs_change[i].second == true) {
+ ++pbn;
+ } else {
+ --pbn;
+ }
+ pbns[i] = std::make_pair(places_where_pbs_change[i].first, pbn);
+ }
+ return pbns;
+}
+
+inline double compute_euclidean_distance(const std::pair<double, double>& f, const std::pair<double, double>& s) {
+ return sqrt((f.first - s.first) * (f.first - s.first) + (f.second - s.second) * (f.second - s.second));
+}
+
+std::vector<double> Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) const {
+ bool dbg = false;
+ if (dbg) {
+ std::cerr << "Here are the intervals : \n";
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ std::cerr << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n";
+ }
+ getchar();
+ }
+
+ std::vector<double> result;
+ // compute all to all distance between point in the diagram. Also, consider points in the diagonal with the infinite
+ // multiplicity.
+ std::vector<std::vector<double> > distances(this->intervals.size());
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ std::vector<double> aa(this->intervals.size());
+ std::fill(aa.begin(), aa.end(), 0);
+ distances[i] = aa;
+ }
+ std::vector<double> distances_from_diagonal(this->intervals.size());
+ std::fill(distances_from_diagonal.begin(), distances_from_diagonal.end(), 0);
+
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ std::vector<double> distancesFromI;
+ for (size_t j = i + 1; j != this->intervals.size(); ++j) {
+ distancesFromI.push_back(compute_euclidean_distance(this->intervals[i], this->intervals[j]));
+ }
+ // also add a distance from this guy to diagonal:
+ double distanceToDiagonal = compute_euclidean_distance(
+ this->intervals[i], std::make_pair(0.5 * (this->intervals[i].first + this->intervals[i].second),
+ 0.5 * (this->intervals[i].first + this->intervals[i].second)));
+ distances_from_diagonal[i] = distanceToDiagonal;
+
+ if (dbg) {
+ std::cerr << "Here are the distances form the point : [" << this->intervals[i].first << " , "
+ << this->intervals[i].second << "] in the diagram \n";
+ for (size_t aa = 0; aa != distancesFromI.size(); ++aa) {
+ std::cerr << "To : " << i + aa << " : " << distancesFromI[aa] << " ";
+ }
+ std::cerr << std::endl;
+ getchar();
+ }
+
+ // filling in the distances matrix:
+ for (size_t j = i + 1; j != this->intervals.size(); ++j) {
+ distances[i][j] = distancesFromI[j - i - 1];
+ distances[j][i] = distancesFromI[j - i - 1];
+ }
+ }
+ if (dbg) {
+ std::cerr << "Here is the distance matrix : \n";
+ for (size_t i = 0; i != distances.size(); ++i) {
+ for (size_t j = 0; j != distances.size(); ++j) {
+ std::cerr << distances[i][j] << " ";
+ }
+ std::cerr << std::endl;
+ }
+ std::cerr << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl;
+ for (size_t i = 0; i != distances_from_diagonal.size(); ++i) {
+ std::cerr << distances_from_diagonal[i] << " ";
+ }
+ std::cerr << std::endl << std::endl;
+ getchar();
+ }
+
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ std::vector<double> distancesFromI = distances[i];
+ distancesFromI.push_back(distances_from_diagonal[i]);
+
+ // sort it:
+ std::sort(distancesFromI.begin(), distancesFromI.end(), std::greater<double>());
+
+ if (k > distancesFromI.size()) {
+ if (dbg) {
+ std::cerr << "There are not enough neighbors in your set. We set the result to plus infty \n";
+ }
+ result.push_back(std::numeric_limits<double>::max());
+ } else {
+ if (distances_from_diagonal[i] > distancesFromI[k]) {
+ if (dbg) {
+ std::cerr << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n";
+ }
+ result.push_back(distances_from_diagonal[i]);
+ } else {
+ result.push_back(distancesFromI[k]);
+ }
+ }
+ }
+ std::sort(result.begin(), result.end(), std::greater<double>());
+ result.resize(std::min(result.size(), where_to_cut));
+
+ return result;
+}
+
+double Persistence_intervals::project_to_R(int number_of_function) const {
+ double result = 0;
+
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ result +=
+ (this->intervals[i].second - this->intervals[i].first) * (this->intervals[i].second - this->intervals[i].first);
+ }
+
+ return result;
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_INTERVALS_H_
diff --git a/include/gudhi/Persistence_intervals_with_distances.h b/include/gudhi/Persistence_intervals_with_distances.h
new file mode 100644
index 00000000..79908883
--- /dev/null
+++ b/include/gudhi/Persistence_intervals_with_distances.h
@@ -0,0 +1,63 @@
+/* This file is part of the Gudhi hiLibrary. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_INTERVALS_WITH_DISTANCES_H_
+#define PERSISTENCE_INTERVALS_WITH_DISTANCES_H_
+
+#include <gudhi/Persistence_intervals.h>
+#include <gudhi/Bottleneck.h>
+
+#include <limits>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+class Persistence_intervals_with_distances : public Persistence_intervals {
+ public:
+ using Persistence_intervals::Persistence_intervals;
+
+ /**
+ *Computations of distance from the current persistnce diagram to the persistence diagram given as a parameter of this
+ *function.
+ *The last but one parameter, power, is here in case we would like to compute p=th Wasserstein distance. At the
+ *moment, this method only implement Bottleneck distance,
+ * which is infinity Wasserstein distance. Therefore any power which is not the default std::numeric_limits< double
+ *>::max() will be ignored and an
+ * exception will be thrown.
+ * The last parameter, tolerance, it is an additiv error of the approimation, set by default to zero.
+ **/
+ double distance(const Persistence_intervals_with_distances& second, double power = std::numeric_limits<double>::max(),
+ double tolerance = (std::numeric_limits<double>::min)()) const {
+ if (power >= std::numeric_limits<double>::max()) {
+ return Gudhi::persistence_diagram::bottleneck_distance(this->intervals, second.intervals, tolerance);
+ } else {
+ std::cerr << "At the moment Gudhi do not support Wasserstein distances. We only support Bottleneck distance."
+ << std::endl;
+ throw "At the moment Gudhi do not support Wasserstein distances. We only support Bottleneck distance.";
+ }
+ }
+};
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_INTERVALS_WITH_DISTANCES_H_
diff --git a/include/gudhi/Persistence_landscape.h b/include/gudhi/Persistence_landscape.h
new file mode 100644
index 00000000..c5aa7867
--- /dev/null
+++ b/include/gudhi/Persistence_landscape.h
@@ -0,0 +1,1376 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_LANDSCAPE_H_
+#define PERSISTENCE_LANDSCAPE_H_
+
+// gudhi include
+#include <gudhi/read_persistence_from_file.h>
+#include <gudhi/common_persistence_representations.h>
+
+// standard include
+#include <cmath>
+#include <iostream>
+#include <vector>
+#include <limits>
+#include <fstream>
+#include <sstream>
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <functional>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+// pre declaration
+class Persistence_landscape;
+template <typename operation>
+Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscape& land1,
+ const Persistence_landscape& land2);
+
+/**
+ * \class Persistence_landscape Persistence_landscape.h gudhi/Persistence_landscape.h
+ * \brief A class implementing persistence landscapes data structures.
+ *
+ * \ingroup Persistence_representations
+ *
+ * \details
+ * For theoretical description, please consult <i>Statistical topological data analysis using persistence
+ * landscapes</i>\cite bubenik_landscapes_2015 , and for details of algorithms,
+ * <i>A persistence landscapes toolbox for topological statistics</i>\cite bubenik_dlotko_landscapes_2016.
+ *
+ * Persistence landscapes allow vectorization, computations of distances, computations of projections to Real,
+ * computations of averages and scalar products. Therefore they implement suitable interfaces.
+ * It implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+ * Real_valued_topological_data, Topological_data_with_averages, Topological_data_with_scalar_product
+ *
+ * Note that at the moment, due to rounding errors during the construction of persistence landscapes, elements which
+ * are different by 0.000005 are considered the same. If the scale in your persistence diagrams is comparable to this
+ * value, please rescale them before use this code.
+ *
+**/
+class Persistence_landscape {
+ public:
+ /**
+ * Default constructor.
+ **/
+ Persistence_landscape() { this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals(); }
+
+ /**
+ * Constructor that takes as an input a vector of birth-death pairs.
+ **/
+ Persistence_landscape(const std::vector<std::pair<double, double> >& p,
+ size_t number_of_levels = std::numeric_limits<size_t>::max());
+
+ /**
+ * Constructor that reads persistence intervals from file and creates persistence landscape. The format of the
+ *input file is the following: in each line we put birth-death pair. Last line is assumed
+ * to be empty. Even if the points within a line are not ordered, they will be ordered while the input is read.
+ **/
+ Persistence_landscape(const char* filename, size_t dimension = std::numeric_limits<unsigned>::max(),
+ size_t number_of_levels = std::numeric_limits<size_t>::max());
+
+ /**
+ * This procedure loads a landscape from file. It erase all the data that was previously stored in this landscape.
+ **/
+ void load_landscape_from_file(const char* filename);
+
+ /**
+ * The procedure stores a landscape to a file. The file can be later used by a procedure load_landscape_from_file.
+ **/
+ void print_to_file(const char* filename) const;
+
+ /**
+ * This function compute integral of the landscape (defined formally as sum of integrals on R of all landscape
+ *functions)
+ **/
+ double compute_integral_of_landscape() const;
+
+ /**
+ * This function compute integral of the 'level'-level of a landscape.
+ **/
+ double compute_integral_of_a_level_of_a_landscape(size_t level) const;
+
+ /**
+ * This function compute integral of the landscape p-th power of a landscape (defined formally as sum of integrals
+ *on R of p-th powers of all landscape functions)
+ **/
+ double compute_integral_of_landscape(double p) const; // this function compute integral of p-th power of landscape.
+
+ /**
+ * A function that computes the value of a landscape at a given point. The parameters of the function are: unsigned
+ *level and double x.
+ * The procedure will compute the value of the level-landscape at the point x.
+ **/
+ double compute_value_at_a_given_point(unsigned level, double x) const;
+
+ /**
+ * Writing landscape into a stream. A i-th level landscape starts with a string "lambda_i". Then the discontinuity
+ *points of the landscapes follows.
+ * Shall those points be joined with lines, we will obtain the i-th landscape function.
+ **/
+ friend std::ostream& operator<<(std::ostream& out, Persistence_landscape& land);
+
+ template <typename operation>
+ friend Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscape& land1,
+ const Persistence_landscape& land2);
+
+ /**
+ *\private A function that compute sum of two landscapes.
+ **/
+ friend Persistence_landscape add_two_landscapes(const Persistence_landscape& land1,
+ const Persistence_landscape& land2) {
+ return operation_on_pair_of_landscapes<std::plus<double> >(land1, land2);
+ }
+
+ /**
+ *\private A function that compute difference of two landscapes.
+ **/
+ friend Persistence_landscape subtract_two_landscapes(const Persistence_landscape& land1,
+ const Persistence_landscape& land2) {
+ return operation_on_pair_of_landscapes<std::minus<double> >(land1, land2);
+ }
+
+ /**
+ * An operator +, that compute sum of two landscapes.
+ **/
+ friend Persistence_landscape operator+(const Persistence_landscape& first, const Persistence_landscape& second) {
+ return add_two_landscapes(first, second);
+ }
+
+ /**
+ * An operator -, that compute difference of two landscapes.
+ **/
+ friend Persistence_landscape operator-(const Persistence_landscape& first, const Persistence_landscape& second) {
+ return subtract_two_landscapes(first, second);
+ }
+
+ /**
+ * An operator * that allows multiplication of a landscape by a real number.
+ **/
+ friend Persistence_landscape operator*(const Persistence_landscape& first, double con) {
+ return first.multiply_lanscape_by_real_number_not_overwrite(con);
+ }
+
+ /**
+ * An operator * that allows multiplication of a landscape by a real number (order of parameters swapped).
+ **/
+ friend Persistence_landscape operator*(double con, const Persistence_landscape& first) {
+ return first.multiply_lanscape_by_real_number_not_overwrite(con);
+ }
+
+ /**
+ * Operator +=. The second parameter is persistence landscape.
+ **/
+ Persistence_landscape operator+=(const Persistence_landscape& rhs) {
+ *this = *this + rhs;
+ return *this;
+ }
+
+ /**
+ * Operator -=. The second parameter is a persistence landscape.
+ **/
+ Persistence_landscape operator-=(const Persistence_landscape& rhs) {
+ *this = *this - rhs;
+ return *this;
+ }
+
+ /**
+ * Operator *=. The second parameter is a real number by which the y values of all landscape functions are multiplied.
+ *The x-values remain unchanged.
+ **/
+ Persistence_landscape operator*=(double x) {
+ *this = *this * x;
+ return *this;
+ }
+
+ /**
+ * Operator /=. The second parameter is a real number.
+ **/
+ Persistence_landscape operator/=(double x) {
+ if (x == 0) throw("In operator /=, division by 0. Program terminated.");
+ *this = *this * (1 / x);
+ return *this;
+ }
+
+ /**
+ * An operator to compare two persistence landscapes.
+ **/
+ bool operator==(const Persistence_landscape& rhs) const;
+
+ /**
+ * An operator to compare two persistence landscapes.
+ **/
+ bool operator!=(const Persistence_landscape& rhs) const { return !((*this) == rhs); }
+
+ /**
+ * Computations of maximum (y) value of landscape.
+ **/
+ double compute_maximum() const {
+ double maxValue = 0;
+ if (this->land.size()) {
+ maxValue = -std::numeric_limits<int>::max();
+ for (size_t i = 0; i != this->land[0].size(); ++i) {
+ if (this->land[0][i].second > maxValue) maxValue = this->land[0][i].second;
+ }
+ }
+ return maxValue;
+ }
+
+ /**
+ *\private Computations of minimum (y) value of landscape.
+ **/
+ double compute_minimum() const {
+ double minValue = 0;
+ if (this->land.size()) {
+ minValue = std::numeric_limits<int>::max();
+ for (size_t i = 0; i != this->land[0].size(); ++i) {
+ if (this->land[0][i].second < minValue) minValue = this->land[0][i].second;
+ }
+ }
+ return minValue;
+ }
+
+ /**
+ *\private Computations of a \f$L^i\f$ norm of landscape, where i is the input parameter.
+ **/
+ double compute_norm_of_landscape(double i) {
+ Persistence_landscape l;
+ if (i < std::numeric_limits<double>::max()) {
+ return compute_distance_of_landscapes(*this, l, i);
+ } else {
+ return compute_max_norm_distance_of_landscapes(*this, l);
+ }
+ }
+
+ /**
+ * An operator to compute the value of a landscape in the level 'level' at the argument 'x'.
+ **/
+ double operator()(unsigned level, double x) const { return this->compute_value_at_a_given_point(level, x); }
+
+ /**
+ *\private Computations of \f$L^{\infty}\f$ distance between two landscapes.
+ **/
+ friend double compute_max_norm_distance_of_landscapes(const Persistence_landscape& first,
+ const Persistence_landscape& second);
+
+ /**
+ *\private Computations of \f$L^{p}\f$ distance between two landscapes. p is the parameter of the procedure.
+ **/
+ friend double compute_distance_of_landscapes(const Persistence_landscape& first, const Persistence_landscape& second,
+ double p);
+
+ /**
+ * Function to compute absolute value of a PL function. The representation of persistence landscapes allow to store
+ *general PL-function. When computing distance between two landscapes, we compute difference between
+ * them. In this case, a general PL-function with negative value can appear as a result. Then in order to compute
+ *distance, we need to take its absolute value. This is the purpose of this procedure.
+ **/
+ Persistence_landscape abs();
+
+ Persistence_landscape* new_abs();
+
+ /**
+ * Computes the number of landscape functions.
+ **/
+ size_t size() const { return this->land.size(); }
+
+ /**
+ * Compute maximal value of lambda-level landscape.
+ **/
+ double find_max(unsigned lambda) const;
+
+ /**
+ *\private Function to compute inner (scalar) product of two landscapes.
+ **/
+ friend double compute_inner_product(const Persistence_landscape& l1, const Persistence_landscape& l2);
+
+ // Implementations of functions for various concepts.
+
+ /**
+ * The number of projections to R is defined to the number of nonzero landscape functions. I-th projection is an
+ *integral of i-th landscape function over whole R.
+ * This function is required by the Real_valued_topological_data concept.
+ * At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
+ *using it, keep in mind that it
+ * will be most likely changed in the next versions.
+ **/
+ double project_to_R(int number_of_function) const {
+ return this->compute_integral_of_a_level_of_a_landscape((size_t)number_of_function);
+ }
+
+ /**
+ * The function gives the number of possible projections to R. This function is required by the
+ *Real_valued_topological_data concept.
+ **/
+ size_t number_of_projections_to_R() const { return this->number_of_functions_for_projections_to_reals; }
+
+ /**
+ * This function produce a vector of doubles based on a landscape. It is required in a concept
+ * Vectorized_topological_data
+ */
+ std::vector<double> vectorize(int number_of_function) const {
+ // TODO(PD) think of something smarter over here
+ std::vector<double> v;
+ if ((size_t)number_of_function > this->land.size()) {
+ return v;
+ }
+ v.reserve(this->land[number_of_function].size());
+ for (size_t i = 0; i != this->land[number_of_function].size(); ++i) {
+ v.push_back(this->land[number_of_function][i].second);
+ }
+ return v;
+ }
+ /**
+ * This function return the number of functions that allows vectorization of persistence landscape. It is required in
+ *a concept Vectorized_topological_data.
+ **/
+ size_t number_of_vectorize_functions() const { return this->number_of_functions_for_vectorization; }
+
+ /**
+ * A function to compute averaged persistence landscape, based on vector of persistence landscapes.
+ * This function is required by Topological_data_with_averages concept.
+ **/
+ void compute_average(const std::vector<Persistence_landscape*>& to_average) {
+ bool dbg = false;
+
+ if (dbg) {
+ std::cerr << "to_average.size() : " << to_average.size() << std::endl;
+ }
+
+ std::vector<Persistence_landscape*> nextLevelMerge(to_average.size());
+ for (size_t i = 0; i != to_average.size(); ++i) {
+ nextLevelMerge[i] = to_average[i];
+ }
+ bool is_this_first_level = true; // in the loop, we will create dynamically a number of intermediate complexes. We
+ // have to clean that up, but we cannot erase the initial landscapes we have
+ // to average. In this case, we simply check if the nextLevelMerge are the input landscapes or the ones created in
+ // that loop by using this extra variable.
+
+ while (nextLevelMerge.size() != 1) {
+ if (dbg) {
+ std::cerr << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl;
+ }
+ std::vector<Persistence_landscape*> nextNextLevelMerge;
+ nextNextLevelMerge.reserve(to_average.size());
+ for (size_t i = 0; i < nextLevelMerge.size(); i = i + 2) {
+ if (dbg) {
+ std::cerr << "i : " << i << std::endl;
+ }
+ Persistence_landscape* l = new Persistence_landscape;
+ if (i + 1 != nextLevelMerge.size()) {
+ (*l) = (*nextLevelMerge[i]) + (*nextLevelMerge[i + 1]);
+ } else {
+ (*l) = *nextLevelMerge[i];
+ }
+ nextNextLevelMerge.push_back(l);
+ }
+ if (dbg) {
+ std::cerr << "After this iteration \n";
+ getchar();
+ }
+
+ if (!is_this_first_level) {
+ // deallocate the memory if the vector nextLevelMerge do not consist of the initial landscapes
+ for (size_t i = 0; i != nextLevelMerge.size(); ++i) {
+ delete nextLevelMerge[i];
+ }
+ }
+ is_this_first_level = false;
+ nextLevelMerge.swap(nextNextLevelMerge);
+ }
+ (*this) = (*nextLevelMerge[0]);
+ (*this) *= 1 / static_cast<double>(to_average.size());
+ }
+
+ /**
+ * A function to compute distance between persistence landscape.
+ * The parameter of this function is a Persistence_landscape.
+ * This function is required in Topological_data_with_distances concept.
+ * For max norm distance, set power to std::numeric_limits<double>::max()
+ **/
+ double distance(const Persistence_landscape& second, double power = 1) const {
+ if (power < std::numeric_limits<double>::max()) {
+ return compute_distance_of_landscapes(*this, second, power);
+ } else {
+ return compute_max_norm_distance_of_landscapes(*this, second);
+ }
+ }
+
+ /**
+ * A function to compute scalar product of persistence landscapes.
+ * The parameter of this function is a Persistence_landscape.
+ * This function is required in Topological_data_with_scalar_product concept.
+ **/
+ double compute_scalar_product(const Persistence_landscape& second) const {
+ return compute_inner_product((*this), second);
+ }
+ // end of implementation of functions needed for concepts.
+
+ /**
+ * This procedure returns y-range of a given level persistence landscape. If a default value is used, the y-range
+ * of 0th level landscape is given (and this range contains the ranges of all other landscapes).
+ **/
+ std::pair<double, double> get_y_range(size_t level = 0) const {
+ std::pair<double, double> result;
+ if (level < this->land.size()) {
+ double maxx = this->compute_maximum();
+ double minn = this->compute_minimum();
+ result = std::make_pair(minn, maxx);
+ } else {
+ result = std::make_pair(0, 0);
+ }
+ return result;
+ }
+
+ // a function used to create a gnuplot script for visualization of landscapes
+ void plot(const char* filename, double xRangeBegin = std::numeric_limits<double>::max(),
+ double xRangeEnd = std::numeric_limits<double>::max(),
+ double yRangeBegin = std::numeric_limits<double>::max(),
+ double yRangeEnd = std::numeric_limits<double>::max(), int from = std::numeric_limits<int>::max(),
+ int to = std::numeric_limits<int>::max());
+
+ protected:
+ std::vector<std::vector<std::pair<double, double> > > land;
+ size_t number_of_functions_for_vectorization;
+ size_t number_of_functions_for_projections_to_reals;
+
+ void construct_persistence_landscape_from_barcode(const std::vector<std::pair<double, double> >& p,
+ size_t number_of_levels = std::numeric_limits<size_t>::max());
+ Persistence_landscape multiply_lanscape_by_real_number_not_overwrite(double x) const;
+ void multiply_lanscape_by_real_number_overwrite(double x);
+ friend double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1,
+ const Persistence_landscape& pl2);
+
+ void set_up_numbers_of_functions_for_vectorization_and_projections_to_reals() {
+ // warning, this function can be only called after filling in the intervals vector.
+ this->number_of_functions_for_vectorization = this->land.size();
+ this->number_of_functions_for_projections_to_reals = this->land.size();
+ }
+};
+
+Persistence_landscape::Persistence_landscape(const char* filename, size_t dimension, size_t number_of_levels) {
+ std::vector<std::pair<double, double> > barcode;
+ if (dimension < std::numeric_limits<double>::max()) {
+ barcode = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ } else {
+ barcode = read_persistence_intervals_in_one_dimension_from_file(filename);
+ }
+ this->construct_persistence_landscape_from_barcode(barcode, number_of_levels);
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+}
+
+bool operatorEqualDbg = false;
+bool Persistence_landscape::operator==(const Persistence_landscape& rhs) const {
+ if (this->land.size() != rhs.land.size()) {
+ if (operatorEqualDbg) std::cerr << "1\n";
+ return false;
+ }
+ for (size_t level = 0; level != this->land.size(); ++level) {
+ if (this->land[level].size() != rhs.land[level].size()) {
+ if (operatorEqualDbg) std::cerr << "this->land[level].size() : " << this->land[level].size() << "\n";
+ if (operatorEqualDbg) std::cerr << "rhs.land[level].size() : " << rhs.land[level].size() << "\n";
+ if (operatorEqualDbg) std::cerr << "2\n";
+ return false;
+ }
+ for (size_t i = 0; i != this->land[level].size(); ++i) {
+ if (!(almost_equal(this->land[level][i].first, rhs.land[level][i].first) &&
+ almost_equal(this->land[level][i].second, rhs.land[level][i].second))) {
+ if (operatorEqualDbg)
+ std::cerr << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second
+ << "\n";
+ if (operatorEqualDbg)
+ std::cerr << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n";
+ if (operatorEqualDbg) std::cerr << "3\n";
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+Persistence_landscape::Persistence_landscape(const std::vector<std::pair<double, double> >& p,
+ size_t number_of_levels) {
+ this->construct_persistence_landscape_from_barcode(p, number_of_levels);
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+}
+
+void Persistence_landscape::construct_persistence_landscape_from_barcode(
+ const std::vector<std::pair<double, double> >& p, size_t number_of_levels) {
+ bool dbg = false;
+ if (dbg) {
+ std::cerr << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )"
+ << std::endl;
+ }
+
+ // this is a general algorithm to construct persistence landscapes.
+ std::vector<std::pair<double, double> > bars;
+ bars.insert(bars.begin(), p.begin(), p.end());
+ std::sort(bars.begin(), bars.end(), compare_points_sorting);
+
+ if (dbg) {
+ std::cerr << "Bars : \n";
+ for (size_t i = 0; i != bars.size(); ++i) {
+ std::cerr << bars[i].first << " " << bars[i].second << "\n";
+ }
+ getchar();
+ }
+
+ std::vector<std::pair<double, double> > characteristicPoints(p.size());
+ for (size_t i = 0; i != bars.size(); ++i) {
+ characteristicPoints[i] =
+ std::make_pair((bars[i].first + bars[i].second) / 2.0, (bars[i].second - bars[i].first) / 2.0);
+ }
+ std::vector<std::vector<std::pair<double, double> > > Persistence_landscape;
+ size_t number_of_levels_in_the_landscape = 0;
+ while (!characteristicPoints.empty()) {
+ if (dbg) {
+ for (size_t i = 0; i != characteristicPoints.size(); ++i) {
+ std::cout << "(" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n";
+ }
+ std::cin.ignore();
+ }
+
+ std::vector<std::pair<double, double> > lambda_n;
+ lambda_n.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
+ lambda_n.push_back(std::make_pair(minus_length(characteristicPoints[0]), 0));
+ lambda_n.push_back(characteristicPoints[0]);
+
+ if (dbg) {
+ std::cerr << "1 Adding to lambda_n : (" << -std::numeric_limits<int>::max() << " " << 0 << ") , ("
+ << minus_length(characteristicPoints[0]) << " " << 0 << ") , (" << characteristicPoints[0].first << " "
+ << characteristicPoints[0].second << ") \n";
+ }
+
+ size_t i = 1;
+ std::vector<std::pair<double, double> > newCharacteristicPoints;
+ while (i < characteristicPoints.size()) {
+ size_t p = 1;
+ if ((minus_length(characteristicPoints[i]) >= minus_length(lambda_n[lambda_n.size() - 1])) &&
+ (birth_plus_deaths(characteristicPoints[i]) > birth_plus_deaths(lambda_n[lambda_n.size() - 1]))) {
+ if (minus_length(characteristicPoints[i]) < birth_plus_deaths(lambda_n[lambda_n.size() - 1])) {
+ std::pair<double, double> point = std::make_pair(
+ (minus_length(characteristicPoints[i]) + birth_plus_deaths(lambda_n[lambda_n.size() - 1])) / 2,
+ (birth_plus_deaths(lambda_n[lambda_n.size() - 1]) - minus_length(characteristicPoints[i])) / 2);
+ lambda_n.push_back(point);
+ if (dbg) {
+ std::cerr << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n";
+ }
+
+ if (dbg) {
+ std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
+ << characteristicPoints[i + p].second << "\n";
+ std::cerr << "point : " << point.first << " " << point.second << "\n";
+ getchar();
+ }
+
+ while ((i + p < characteristicPoints.size()) &&
+ (almost_equal(minus_length(point), minus_length(characteristicPoints[i + p]))) &&
+ (birth_plus_deaths(point) <= birth_plus_deaths(characteristicPoints[i + p]))) {
+ newCharacteristicPoints.push_back(characteristicPoints[i + p]);
+ if (dbg) {
+ std::cerr << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
+ << characteristicPoints[i + p].second << ")\n";
+ getchar();
+ }
+ ++p;
+ }
+
+ newCharacteristicPoints.push_back(point);
+ if (dbg) {
+ std::cerr << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n";
+ }
+
+ while ((i + p < characteristicPoints.size()) &&
+ (minus_length(point) <= minus_length(characteristicPoints[i + p])) &&
+ (birth_plus_deaths(point) >= birth_plus_deaths(characteristicPoints[i + p]))) {
+ newCharacteristicPoints.push_back(characteristicPoints[i + p]);
+ if (dbg) {
+ std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
+ << characteristicPoints[i + p].second << "\n";
+ std::cerr << "point : " << point.first << " " << point.second << "\n";
+ std::cerr << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p])
+ << " , " << birth_plus_deaths(characteristicPoints[i + p]) << "\n";
+ std::cerr << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point)
+ << "\n";
+
+ std::cerr << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
+ << characteristicPoints[i + p].second << ")\n";
+ getchar();
+ }
+ ++p;
+ }
+
+ } else {
+ lambda_n.push_back(std::make_pair(birth_plus_deaths(lambda_n[lambda_n.size() - 1]), 0));
+ lambda_n.push_back(std::make_pair(minus_length(characteristicPoints[i]), 0));
+ if (dbg) {
+ std::cerr << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0
+ << ")\n";
+ std::cerr << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n";
+ }
+ }
+ lambda_n.push_back(characteristicPoints[i]);
+ if (dbg) {
+ std::cerr << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " "
+ << characteristicPoints[i].second << ")\n";
+ }
+ } else {
+ newCharacteristicPoints.push_back(characteristicPoints[i]);
+ if (dbg) {
+ std::cerr << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " "
+ << characteristicPoints[i].second << ")\n";
+ }
+ }
+ i = i + p;
+ }
+ lambda_n.push_back(std::make_pair(birth_plus_deaths(lambda_n[lambda_n.size() - 1]), 0));
+ lambda_n.push_back(std::make_pair(std::numeric_limits<int>::max(), 0));
+
+ characteristicPoints = newCharacteristicPoints;
+
+ lambda_n.erase(std::unique(lambda_n.begin(), lambda_n.end()), lambda_n.end());
+ this->land.push_back(lambda_n);
+
+ ++number_of_levels_in_the_landscape;
+ if (number_of_levels == number_of_levels_in_the_landscape) {
+ break;
+ }
+ }
+}
+
+// this function find maximum of lambda_n
+double Persistence_landscape::find_max(unsigned lambda) const {
+ if (this->land.size() < lambda) return 0;
+ double maximum = -std::numeric_limits<int>::max();
+ for (size_t i = 0; i != this->land[lambda].size(); ++i) {
+ if (this->land[lambda][i].second > maximum) maximum = this->land[lambda][i].second;
+ }
+ return maximum;
+}
+
+double Persistence_landscape::compute_integral_of_landscape() const {
+ double result = 0;
+ for (size_t i = 0; i != this->land.size(); ++i) {
+ for (size_t nr = 2; nr != this->land[i].size() - 1; ++nr) {
+ // it suffices to compute every planar integral and then sum them up for each lambda_n
+ result += 0.5 * (this->land[i][nr].first - this->land[i][nr - 1].first) *
+ (this->land[i][nr].second + this->land[i][nr - 1].second);
+ }
+ }
+ return result;
+}
+
+double Persistence_landscape::compute_integral_of_a_level_of_a_landscape(size_t level) const {
+ double result = 0;
+ if (level >= this->land.size()) {
+ // this landscape function is constantly equal 0, so is the integral.
+ return result;
+ }
+ // also negative landscapes are assumed to be zero.
+ if (level < 0) return 0;
+
+ for (size_t nr = 2; nr != this->land[level].size() - 1; ++nr) {
+ // it suffices to compute every planar integral and then sum them up for each lambda_n
+ result += 0.5 * (this->land[level][nr].first - this->land[level][nr - 1].first) *
+ (this->land[level][nr].second + this->land[level][nr - 1].second);
+ }
+
+ return result;
+}
+
+double Persistence_landscape::compute_integral_of_landscape(double p) const {
+ bool dbg = false;
+ double result = 0;
+ for (size_t i = 0; i != this->land.size(); ++i) {
+ for (size_t nr = 2; nr != this->land[i].size() - 1; ++nr) {
+ if (dbg) std::cout << "nr : " << nr << "\n";
+ // In this interval, the landscape has a form f(x) = ax+b. We want to compute integral of (ax+b)^p = 1/a *
+ // (ax+b)^{p+1}/(p+1)
+ std::pair<double, double> coef = compute_parameters_of_a_line(this->land[i][nr], this->land[i][nr - 1]);
+ double a = coef.first;
+ double b = coef.second;
+
+ if (dbg)
+ std::cout << "(" << this->land[i][nr].first << "," << this->land[i][nr].second << ") , "
+ << this->land[i][nr - 1].first << "," << this->land[i][nr].second << ")" << std::endl;
+ if (this->land[i][nr].first == this->land[i][nr - 1].first) continue;
+ if (a != 0) {
+ result += 1 / (a * (p + 1)) *
+ (pow((a * this->land[i][nr].first + b), p + 1) - pow((a * this->land[i][nr - 1].first + b), p + 1));
+ } else {
+ result += (this->land[i][nr].first - this->land[i][nr - 1].first) * (pow(this->land[i][nr].second, p));
+ }
+ if (dbg) {
+ std::cout << "a : " << a << " , b : " << b << std::endl;
+ std::cout << "result : " << result << std::endl;
+ }
+ }
+ }
+ return result;
+}
+
+// this is O(log(n)) algorithm, where n is number of points in this->land.
+double Persistence_landscape::compute_value_at_a_given_point(unsigned level, double x) const {
+ bool compute_value_at_a_given_pointDbg = false;
+ // in such a case lambda_level = 0.
+ if (level > this->land.size()) return 0;
+
+ // we know that the points in this->land[level] are ordered according to x coordinate. Therefore, we can find the
+ // point by using bisection:
+ unsigned coordBegin = 1;
+ unsigned coordEnd = this->land[level].size() - 2;
+
+ if (compute_value_at_a_given_pointDbg) {
+ std::cerr << "Here \n";
+ std::cerr << "x : " << x << "\n";
+ std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
+ std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
+ }
+
+ // in this case x is outside the support of the landscape, therefore the value of the landscape is 0.
+ if (x <= this->land[level][coordBegin].first) return 0;
+ if (x >= this->land[level][coordEnd].first) return 0;
+
+ if (compute_value_at_a_given_pointDbg) std::cerr << "Entering to the while loop \n";
+
+ while (coordBegin + 1 != coordEnd) {
+ if (compute_value_at_a_given_pointDbg) {
+ std::cerr << "coordBegin : " << coordBegin << "\n";
+ std::cerr << "coordEnd : " << coordEnd << "\n";
+ std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
+ std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
+ }
+
+ unsigned newCord = (unsigned)floor((coordEnd + coordBegin) / 2.0);
+
+ if (compute_value_at_a_given_pointDbg) {
+ std::cerr << "newCord : " << newCord << "\n";
+ std::cerr << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n";
+ std::cin.ignore();
+ }
+
+ if (this->land[level][newCord].first <= x) {
+ coordBegin = newCord;
+ if (this->land[level][newCord].first == x) return this->land[level][newCord].second;
+ } else {
+ coordEnd = newCord;
+ }
+ }
+
+ if (compute_value_at_a_given_pointDbg) {
+ std::cout << "x : " << x << " is between : " << this->land[level][coordBegin].first << " a "
+ << this->land[level][coordEnd].first << "\n";
+ std::cout << "the y coords are : " << this->land[level][coordBegin].second << " a "
+ << this->land[level][coordEnd].second << "\n";
+ std::cerr << "coordBegin : " << coordBegin << "\n";
+ std::cerr << "coordEnd : " << coordEnd << "\n";
+ std::cin.ignore();
+ }
+ return function_value(this->land[level][coordBegin], this->land[level][coordEnd], x);
+}
+
+std::ostream& operator<<(std::ostream& out, Persistence_landscape& land) {
+ for (size_t level = 0; level != land.land.size(); ++level) {
+ out << "Lambda_" << level << ":" << std::endl;
+ for (size_t i = 0; i != land.land[level].size(); ++i) {
+ if (land.land[level][i].first == -std::numeric_limits<int>::max()) {
+ out << "-inf";
+ } else {
+ if (land.land[level][i].first == std::numeric_limits<int>::max()) {
+ out << "+inf";
+ } else {
+ out << land.land[level][i].first;
+ }
+ }
+ out << " , " << land.land[level][i].second << std::endl;
+ }
+ }
+ return out;
+}
+
+void Persistence_landscape::multiply_lanscape_by_real_number_overwrite(double x) {
+ for (size_t dim = 0; dim != this->land.size(); ++dim) {
+ for (size_t i = 0; i != this->land[dim].size(); ++i) {
+ this->land[dim][i].second *= x;
+ }
+ }
+}
+
+bool AbsDbg = false;
+Persistence_landscape Persistence_landscape::abs() {
+ Persistence_landscape result;
+ for (size_t level = 0; level != this->land.size(); ++level) {
+ if (AbsDbg) {
+ std::cout << "level: " << level << std::endl;
+ }
+ std::vector<std::pair<double, double> > lambda_n;
+ lambda_n.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
+ for (size_t i = 1; i != this->land[level].size(); ++i) {
+ if (AbsDbg) {
+ std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
+ << this->land[level][i].second << std::endl;
+ }
+ // if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to
+ // add one landscape point t o result
+ if ((this->land[level][i - 1].second) * (this->land[level][i].second) < 0) {
+ double zero =
+ find_zero_of_a_line_segment_between_those_two_points(this->land[level][i - 1], this->land[level][i]);
+
+ lambda_n.push_back(std::make_pair(zero, 0));
+ lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
+ if (AbsDbg) {
+ std::cout << "Adding pair : (" << zero << ",0)" << std::endl;
+ std::cout << "In the same step adding pair : (" << this->land[level][i].first << ","
+ << fabs(this->land[level][i].second) << ") " << std::endl;
+ std::cin.ignore();
+ }
+ } else {
+ lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
+ if (AbsDbg) {
+ std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
+ << ") " << std::endl;
+ std::cin.ignore();
+ }
+ }
+ }
+ result.land.push_back(lambda_n);
+ }
+ return result;
+}
+
+Persistence_landscape* Persistence_landscape::new_abs() {
+ Persistence_landscape* result = new Persistence_landscape(*this);
+ for (size_t level = 0; level != this->land.size(); ++level) {
+ if (AbsDbg) {
+ std::cout << "level: " << level << std::endl;
+ }
+ std::vector<std::pair<double, double> > lambda_n;
+ lambda_n.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
+ for (size_t i = 1; i != this->land[level].size(); ++i) {
+ if (AbsDbg) {
+ std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
+ << this->land[level][i].second << std::endl;
+ }
+ // if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to
+ // add one landscape point t o result
+ if ((this->land[level][i - 1].second) * (this->land[level][i].second) < 0) {
+ double zero =
+ find_zero_of_a_line_segment_between_those_two_points(this->land[level][i - 1], this->land[level][i]);
+
+ lambda_n.push_back(std::make_pair(zero, 0));
+ lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
+ if (AbsDbg) {
+ std::cout << "Adding pair : (" << zero << ",0)" << std::endl;
+ std::cout << "In the same step adding pair : (" << this->land[level][i].first << ","
+ << fabs(this->land[level][i].second) << ") " << std::endl;
+ std::cin.ignore();
+ }
+ } else {
+ lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
+ if (AbsDbg) {
+ std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
+ << ") " << std::endl;
+ std::cin.ignore();
+ }
+ }
+ }
+ result->land.push_back(lambda_n);
+ }
+ return result;
+}
+
+Persistence_landscape Persistence_landscape::multiply_lanscape_by_real_number_not_overwrite(double x) const {
+ std::vector<std::vector<std::pair<double, double> > > result(this->land.size());
+ for (size_t dim = 0; dim != this->land.size(); ++dim) {
+ std::vector<std::pair<double, double> > lambda_dim(this->land[dim].size());
+ for (size_t i = 0; i != this->land[dim].size(); ++i) {
+ lambda_dim[i] = std::make_pair(this->land[dim][i].first, x * this->land[dim][i].second);
+ }
+ result[dim] = lambda_dim;
+ }
+ Persistence_landscape res;
+ // CHANGE
+ // res.land = result;
+ res.land.swap(result);
+ return res;
+} // multiply_lanscape_by_real_number_overwrite
+
+void Persistence_landscape::print_to_file(const char* filename) const {
+ std::ofstream write;
+ write.open(filename);
+ for (size_t dim = 0; dim != this->land.size(); ++dim) {
+ write << "#lambda_" << dim << std::endl;
+ for (size_t i = 1; i != this->land[dim].size() - 1; ++i) {
+ write << this->land[dim][i].first << " " << this->land[dim][i].second << std::endl;
+ }
+ }
+ write.close();
+}
+
+void Persistence_landscape::load_landscape_from_file(const char* filename) {
+ bool dbg = false;
+ // removing the current content of the persistence landscape.
+ this->land.clear();
+
+ // this constructor reads persistence landscape form a file. This file have to be created by this software before head
+ std::ifstream in;
+ in.open(filename);
+ if (!in.good()) {
+ std::cerr << "The file : " << filename << " do not exist. The program will now terminate \n";
+ throw "The persistence landscape file do not exist. The program will now terminate \n";
+ }
+
+ std::string line;
+ std::vector<std::pair<double, double> > landscapeAtThisLevel;
+
+ bool isThisAFirsLine = true;
+ while (in.good()) {
+ getline(in, line);
+ if (!(line.length() == 0 || line[0] == '#')) {
+ std::stringstream lineSS;
+ lineSS << line;
+ double beginn, endd;
+ lineSS >> beginn;
+ lineSS >> endd;
+ landscapeAtThisLevel.push_back(std::make_pair(beginn, endd));
+ if (dbg) {
+ std::cerr << "Reading a point : " << beginn << " , " << endd << std::endl;
+ }
+ } else {
+ if (dbg) {
+ std::cout << "IGNORE LINE\n";
+ getchar();
+ }
+ if (!isThisAFirsLine) {
+ landscapeAtThisLevel.push_back(std::make_pair(std::numeric_limits<int>::max(), 0));
+ this->land.push_back(landscapeAtThisLevel);
+ std::vector<std::pair<double, double> > newLevelOdLandscape;
+ landscapeAtThisLevel.swap(newLevelOdLandscape);
+ }
+ landscapeAtThisLevel.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
+ isThisAFirsLine = false;
+ }
+ }
+ if (landscapeAtThisLevel.size() > 1) {
+ // seems that the last line of the file is not finished with the newline sign. We need to put what we have in
+ // landscapeAtThisLevel to the constructed landscape.
+ landscapeAtThisLevel.push_back(std::make_pair(std::numeric_limits<int>::max(), 0));
+ this->land.push_back(landscapeAtThisLevel);
+ }
+
+ in.close();
+}
+
+template <typename T>
+Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscape& land1,
+ const Persistence_landscape& land2) {
+ bool operation_on_pair_of_landscapesDBG = false;
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "operation_on_pair_of_landscapes\n";
+ std::cin.ignore();
+ }
+ Persistence_landscape result;
+ std::vector<std::vector<std::pair<double, double> > > land(std::max(land1.land.size(), land2.land.size()));
+ result.land = land;
+ T oper;
+
+ if (operation_on_pair_of_landscapesDBG) {
+ for (size_t i = 0; i != std::min(land1.land.size(), land2.land.size()); ++i) {
+ std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
+ std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
+ }
+ getchar();
+ }
+
+ for (size_t i = 0; i != std::min(land1.land.size(), land2.land.size()); ++i) {
+ std::vector<std::pair<double, double> > lambda_n;
+ size_t p = 0;
+ size_t q = 0;
+ while ((p + 1 < land1.land[i].size()) && (q + 1 < land2.land[i].size())) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cerr << "p : " << p << "\n";
+ std::cerr << "q : " << q << "\n";
+ std::cerr << "land1.land.size() : " << land1.land.size() << std::endl;
+ std::cerr << "land2.land.size() : " << land2.land.size() << std::endl;
+ std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
+ std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
+ std::cout << "land1.land[i][p].first : " << land1.land[i][p].first << "\n";
+ std::cout << "land2.land[i][q].first : " << land2.land[i][q].first << "\n";
+ }
+
+ if (land1.land[i][p].first < land2.land[i][q].first) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "first \n";
+ std::cout << " function_value(land2.land[i][q-1],land2.land[i][q],land1.land[i][p].first) : "
+ << function_value(land2.land[i][q - 1], land2.land[i][q], land1.land[i][p].first) << "\n";
+ }
+ lambda_n.push_back(
+ std::make_pair(land1.land[i][p].first,
+ oper(static_cast<double>(land1.land[i][p].second),
+ function_value(land2.land[i][q - 1], land2.land[i][q], land1.land[i][p].first))));
+ ++p;
+ continue;
+ }
+ if (land1.land[i][p].first > land2.land[i][q].first) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "Second \n";
+ std::cout << "function_value(" << land1.land[i][p - 1].first << " " << land1.land[i][p - 1].second << " ,"
+ << land1.land[i][p].first << " " << land1.land[i][p].second << ", " << land2.land[i][q].first
+ << " ) : " << function_value(land1.land[i][p - 1], land1.land[i][p - 1], land2.land[i][q].first)
+ << "\n";
+ std::cout << "oper( " << function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first) << ","
+ << land2.land[i][q].second << " : "
+ << oper(land2.land[i][q].second,
+ function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first))
+ << "\n";
+ }
+ lambda_n.push_back(std::make_pair(
+ land2.land[i][q].first, oper(function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first),
+ land2.land[i][q].second)));
+ ++q;
+ continue;
+ }
+ if (land1.land[i][p].first == land2.land[i][q].first) {
+ if (operation_on_pair_of_landscapesDBG) std::cout << "Third \n";
+ lambda_n.push_back(
+ std::make_pair(land2.land[i][q].first, oper(land1.land[i][p].second, land2.land[i][q].second)));
+ ++p;
+ ++q;
+ }
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "Next iteration \n";
+ }
+ }
+ while ((p + 1 < land1.land[i].size()) && (q + 1 >= land2.land[i].size())) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "New point : " << land1.land[i][p].first
+ << " oper(land1.land[i][p].second,0) : " << oper(land1.land[i][p].second, 0) << std::endl;
+ }
+ lambda_n.push_back(std::make_pair(land1.land[i][p].first, oper(land1.land[i][p].second, 0)));
+ ++p;
+ }
+ while ((p + 1 >= land1.land[i].size()) && (q + 1 < land2.land[i].size())) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "New point : " << land2.land[i][q].first
+ << " oper(0,land2.land[i][q].second) : " << oper(0, land2.land[i][q].second) << std::endl;
+ }
+ lambda_n.push_back(std::make_pair(land2.land[i][q].first, oper(0, land2.land[i][q].second)));
+ ++q;
+ }
+ lambda_n.push_back(std::make_pair(std::numeric_limits<int>::max(), 0));
+ // CHANGE
+ // result.land[i] = lambda_n;
+ result.land[i].swap(lambda_n);
+ }
+ if (land1.land.size() > std::min(land1.land.size(), land2.land.size())) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "land1.land.size() > std::min( land1.land.size() , land2.land.size() )" << std::endl;
+ }
+ for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size());
+ ++i) {
+ std::vector<std::pair<double, double> > lambda_n(land1.land[i]);
+ for (size_t nr = 0; nr != land1.land[i].size(); ++nr) {
+ lambda_n[nr] = std::make_pair(land1.land[i][nr].first, oper(land1.land[i][nr].second, 0));
+ }
+ // CHANGE
+ // result.land[i] = lambda_n;
+ result.land[i].swap(lambda_n);
+ }
+ }
+ if (land2.land.size() > std::min(land1.land.size(), land2.land.size())) {
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "( land2.land.size() > std::min( land1.land.size() , land2.land.size() ) ) " << std::endl;
+ }
+ for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size());
+ ++i) {
+ std::vector<std::pair<double, double> > lambda_n(land2.land[i]);
+ for (size_t nr = 0; nr != land2.land[i].size(); ++nr) {
+ lambda_n[nr] = std::make_pair(land2.land[i][nr].first, oper(0, land2.land[i][nr].second));
+ }
+ // CHANGE
+ // result.land[i] = lambda_n;
+ result.land[i].swap(lambda_n);
+ }
+ }
+ if (operation_on_pair_of_landscapesDBG) {
+ std::cout << "operation_on_pair_of_landscapes END\n";
+ std::cin.ignore();
+ }
+ return result;
+} // operation_on_pair_of_landscapes
+
+double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, const Persistence_landscape& pl2) {
+ bool dbg = false;
+ if (dbg) std::cerr << " compute_maximal_distance_non_symmetric \n";
+ // this distance is not symmetric. It compute ONLY distance between inflection points of pl1 and pl2.
+ double maxDist = 0;
+ size_t minimalNumberOfLevels = std::min(pl1.land.size(), pl2.land.size());
+ for (size_t level = 0; level != minimalNumberOfLevels; ++level) {
+ if (dbg) {
+ std::cerr << "Level : " << level << std::endl;
+ std::cerr << "PL1 : \n";
+ for (size_t i = 0; i != pl1.land[level].size(); ++i) {
+ std::cerr << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n";
+ }
+ std::cerr << "PL2 : \n";
+ for (size_t i = 0; i != pl2.land[level].size(); ++i) {
+ std::cerr << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n";
+ }
+ std::cin.ignore();
+ }
+
+ int p2Count = 0;
+ // In this case, I consider points at the infinity
+ for (size_t i = 1; i != pl1.land[level].size() - 1; ++i) {
+ while (true) {
+ if ((pl1.land[level][i].first >= pl2.land[level][p2Count].first) &&
+ (pl1.land[level][i].first <= pl2.land[level][p2Count + 1].first))
+ break;
+ p2Count++;
+ }
+ double val =
+ fabs(function_value(pl2.land[level][p2Count], pl2.land[level][p2Count + 1], pl1.land[level][i].first) -
+ pl1.land[level][i].second);
+ if (maxDist <= val) maxDist = val;
+
+ if (dbg) {
+ std::cerr << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << ","
+ << pl2.land[level][p2Count + 1].first << "] \n";
+ std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
+ std::cerr << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : "
+ << function_value(pl2.land[level][p2Count], pl2.land[level][p2Count + 1], pl1.land[level][i].first)
+ << std::endl;
+ std::cerr << "val : " << val << std::endl;
+ std::cin.ignore();
+ }
+ }
+ }
+
+ if (dbg) std::cerr << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl;
+
+ if (minimalNumberOfLevels < pl1.land.size()) {
+ for (size_t level = minimalNumberOfLevels; level != pl1.land.size(); ++level) {
+ for (size_t i = 0; i != pl1.land[level].size(); ++i) {
+ if (dbg) std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
+ if (maxDist < pl1.land[level][i].second) maxDist = pl1.land[level][i].second;
+ }
+ }
+ }
+ return maxDist;
+}
+
+double compute_distance_of_landscapes(const Persistence_landscape& first, const Persistence_landscape& second,
+ double p) {
+ bool dbg = false;
+ // This is what we want to compute: (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p). We will do it one step at a
+ // time:
+
+ // first-second :
+ Persistence_landscape lan = first - second;
+
+ //| first-second |:
+ lan = lan.abs();
+
+ if (dbg) {
+ std::cerr << "Abs of difference ; " << lan << std::endl;
+ getchar();
+ }
+
+ if (p < std::numeric_limits<double>::max()) {
+ // \int_{- \infty}^{+\infty}| first-second |^p
+ double result;
+ if (p != 1) {
+ if (dbg) std::cerr << "Power != 1, compute integral to the power p\n";
+ result = lan.compute_integral_of_landscape(p);
+ } else {
+ if (dbg) std::cerr << "Power = 1, compute integral \n";
+ result = lan.compute_integral_of_landscape();
+ }
+ // (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p)
+ return pow(result, 1.0 / p);
+ } else {
+ // p == infty
+ if (dbg) std::cerr << "Power = infty, compute maximum \n";
+ return lan.compute_maximum();
+ }
+}
+
+double compute_max_norm_distance_of_landscapes(const Persistence_landscape& first,
+ const Persistence_landscape& second) {
+ return std::max(compute_maximal_distance_non_symmetric(first, second),
+ compute_maximal_distance_non_symmetric(second, first));
+}
+
+bool comparePairsForMerging(std::pair<double, unsigned> first, std::pair<double, unsigned> second) {
+ return (first.first < second.first);
+}
+
+double compute_inner_product(const Persistence_landscape& l1, const Persistence_landscape& l2) {
+ bool dbg = false;
+ double result = 0;
+
+ for (size_t level = 0; level != std::min(l1.size(), l2.size()); ++level) {
+ if (dbg) {
+ std::cerr << "Computing inner product for a level : " << level << std::endl;
+ getchar();
+ }
+ if (l1.land[level].size() * l2.land[level].size() == 0) continue;
+
+ // endpoints of the interval on which we will compute the inner product of two locally linear functions:
+ double x1 = -std::numeric_limits<int>::max();
+ double x2;
+ if (l1.land[level][1].first < l2.land[level][1].first) {
+ x2 = l1.land[level][1].first;
+ } else {
+ x2 = l2.land[level][1].first;
+ }
+
+ // iterators for the landscapes l1 and l2
+ size_t l1It = 0;
+ size_t l2It = 0;
+
+ while ((l1It < l1.land[level].size() - 1) && (l2It < l2.land[level].size() - 1)) {
+ // compute the value of a inner product on a interval [x1,x2]
+
+ double a, b, c, d;
+
+ if (l1.land[level][l1It + 1].first != l1.land[level][l1It].first) {
+ a = (l1.land[level][l1It + 1].second - l1.land[level][l1It].second) /
+ (l1.land[level][l1It + 1].first - l1.land[level][l1It].first);
+ } else {
+ a = 0;
+ }
+ b = l1.land[level][l1It].second - a * l1.land[level][l1It].first;
+ if (l2.land[level][l2It + 1].first != l2.land[level][l2It].first) {
+ c = (l2.land[level][l2It + 1].second - l2.land[level][l2It].second) /
+ (l2.land[level][l2It + 1].first - l2.land[level][l2It].first);
+ } else {
+ c = 0;
+ }
+ d = l2.land[level][l2It].second - c * l2.land[level][l2It].first;
+
+ double contributionFromThisPart = (a * c * x2 * x2 * x2 / 3 + (a * d + b * c) * x2 * x2 / 2 + b * d * x2) -
+ (a * c * x1 * x1 * x1 / 3 + (a * d + b * c) * x1 * x1 / 2 + b * d * x1);
+
+ result += contributionFromThisPart;
+
+ if (dbg) {
+ std::cerr << "[l1.land[level][l1It].first,l1.land[level][l1It+1].first] : " << l1.land[level][l1It].first
+ << " , " << l1.land[level][l1It + 1].first << std::endl;
+ std::cerr << "[l2.land[level][l2It].first,l2.land[level][l2It+1].first] : " << l2.land[level][l2It].first
+ << " , " << l2.land[level][l2It + 1].first << std::endl;
+ std::cerr << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl;
+ std::cerr << "x1 : " << x1 << " , x2 : " << x2 << std::endl;
+ std::cerr << "contributionFromThisPart : " << contributionFromThisPart << std::endl;
+ std::cerr << "result : " << result << std::endl;
+ getchar();
+ }
+
+ // we have two intervals in which functions are constant:
+ // [l1.land[level][l1It].first , l1.land[level][l1It+1].first]
+ // and
+ // [l2.land[level][l2It].first , l2.land[level][l2It+1].first]
+ // We also have an interval [x1,x2]. Since the intervals in the landscapes cover the whole R, then it is clear
+ // that x2
+ // is either l1.land[level][l1It+1].first of l2.land[level][l2It+1].first or both. Lets test it.
+ if (x2 == l1.land[level][l1It + 1].first) {
+ if (x2 == l2.land[level][l2It + 1].first) {
+ // in this case, we increment both:
+ ++l2It;
+ if (dbg) {
+ std::cerr << "Incrementing both \n";
+ }
+ } else {
+ if (dbg) {
+ std::cerr << "Incrementing first \n";
+ }
+ }
+ ++l1It;
+ } else {
+ // in this case we increment l2It
+ ++l2It;
+ if (dbg) {
+ std::cerr << "Incrementing second \n";
+ }
+ }
+ // Now, we shift x1 and x2:
+ x1 = x2;
+ if (l1.land[level][l1It + 1].first < l2.land[level][l2It + 1].first) {
+ x2 = l1.land[level][l1It + 1].first;
+ } else {
+ x2 = l2.land[level][l2It + 1].first;
+ }
+ }
+ }
+ return result;
+}
+
+void Persistence_landscape::plot(const char* filename, double xRangeBegin, double xRangeEnd, double yRangeBegin,
+ double yRangeEnd, int from, int to) {
+ // this program create a gnuplot script file that allows to plot persistence diagram.
+ std::ofstream out;
+
+ std::ostringstream gnuplot_script;
+ gnuplot_script << filename << "_GnuplotScript";
+ out.open(gnuplot_script.str().c_str());
+
+ if ((xRangeBegin != std::numeric_limits<double>::max()) || (xRangeEnd != std::numeric_limits<double>::max()) ||
+ (yRangeBegin != std::numeric_limits<double>::max()) || (yRangeEnd != std::numeric_limits<double>::max())) {
+ out << "set xrange [" << xRangeBegin << " : " << xRangeEnd << "]" << std::endl;
+ out << "set yrange [" << yRangeBegin << " : " << yRangeEnd << "]" << std::endl;
+ }
+
+ if (from == std::numeric_limits<int>::max()) {
+ from = 0;
+ }
+ if (to == std::numeric_limits<int>::max()) {
+ to = this->land.size();
+ }
+
+ out << "plot ";
+ for (size_t lambda = std::min((size_t)from, this->land.size()); lambda != std::min((size_t)to, this->land.size());
+ ++lambda) {
+ // out << " '-' using 1:2 title 'l" << lambda << "' with lp";
+ out << " '-' using 1:2 notitle with lp";
+ if (lambda + 1 != std::min((size_t)to, this->land.size())) {
+ out << ", \\";
+ }
+ out << std::endl;
+ }
+
+ for (size_t lambda = std::min((size_t)from, this->land.size()); lambda != std::min((size_t)to, this->land.size());
+ ++lambda) {
+ for (size_t i = 1; i != this->land[lambda].size() - 1; ++i) {
+ out << this->land[lambda][i].first << " " << this->land[lambda][i].second << std::endl;
+ }
+ out << "EOF" << std::endl;
+ }
+ std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ << gnuplot_script.str().c_str() << "\'\"" << std::endl;
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_LANDSCAPE_H_
diff --git a/include/gudhi/Persistence_landscape_on_grid.h b/include/gudhi/Persistence_landscape_on_grid.h
new file mode 100644
index 00000000..84fd22ed
--- /dev/null
+++ b/include/gudhi/Persistence_landscape_on_grid.h
@@ -0,0 +1,1348 @@
+/** This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ **/
+
+#ifndef PERSISTENCE_LANDSCAPE_ON_GRID_H_
+#define PERSISTENCE_LANDSCAPE_ON_GRID_H_
+
+// gudhi include
+#include <gudhi/read_persistence_from_file.h>
+#include <gudhi/common_persistence_representations.h>
+
+// standard include
+#include <iostream>
+#include <vector>
+#include <limits>
+#include <fstream>
+#include <sstream>
+#include <algorithm>
+#include <cmath>
+#include <functional>
+#include <utility>
+#include <string>
+#include <cstdint>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+// pre declaration
+class Persistence_landscape_on_grid;
+template <typename operation>
+Persistence_landscape_on_grid operation_on_pair_of_landscapes_on_grid(const Persistence_landscape_on_grid& land1,
+ const Persistence_landscape_on_grid& land2);
+
+/**
+ * \class Persistence_landscape_on_grid Persistence_landscape_on_grid.h gudhi/Persistence_landscape_on_grid.h
+ * \brief A class implementing persistence landscapes by approximating them on a collection of grid points.
+ *
+ * \ingroup Persistence_representations
+ *
+ * \details
+ * Persistence landscapes on grid allows vectorization, computations of distances, computations of projections to Real,
+ * computations of averages and scalar products. Therefore they implement suitable interfaces.
+ * It implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+ * Real_valued_topological_data, Topological_data_with_averages, Topological_data_with_scalar_product
+ *
+ * Note that at the moment, due to rounding errors during the construction of persistence landscapes on a grid,
+ * elements which are different by 0.000005 are considered the same. If the scale in your persistence diagrams
+ * is comparable to this value, please rescale them before use this code.
+**/
+
+// this class implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+// Real_valued_topological_data, Topological_data_with_averages, Topological_data_with_scalar_product
+class Persistence_landscape_on_grid {
+ public:
+ /**
+ * Default constructor.
+ **/
+ Persistence_landscape_on_grid() {
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+ this->grid_min = this->grid_max = 0;
+ }
+
+ /**
+ * Constructor that takes as an input a vector of birth-death pairs.
+ **/
+ Persistence_landscape_on_grid(const std::vector<std::pair<double, double> >& p, double grid_min_, double grid_max_,
+ size_t number_of_points_);
+
+ /**
+ * Constructor that takes as an input a vector of birth-death pairs, parameters of the grid and number of
+ *landscape function to be created.
+ **/
+ Persistence_landscape_on_grid(const std::vector<std::pair<double, double> >& p, double grid_min_, double grid_max_,
+ size_t number_of_points_, unsigned number_of_levels_of_landscape);
+
+ /**
+ * Constructor that reads persistence intervals from file and creates persistence landscape. The format of the
+ *input file is the following: in each line we put birth-death pair. Last line is assumed
+ * to be empty. Even if the points within a line are not ordered, they will be ordered while the input is read.
+ *The additional parameters of this procedure are: ranges of grid, resolution of a grid
+ * number of landscape functions to be created and the dimension of intervals that are need to be read from a file
+ *(in case of Gudhi format files).
+ **/
+ Persistence_landscape_on_grid(const char* filename, double grid_min_, double grid_max_, size_t number_of_points_,
+ unsigned number_of_levels_of_landscape,
+ uint16_t dimension_ = std::numeric_limits<uint16_t>::max());
+
+ /**
+ * Constructor that reads persistence intervals from file and creates persistence landscape. The format of the
+ *input file is the following: in each line we put birth-death pair. Last line is assumed
+ * to be empty. Even if the points within a line are not ordered, they will be ordered while the input is read. The
+ *additional parameters of this procedure are: ranges of grid, resolution of a grid
+ * and the dimension of intervals that are need to be read from a file (in case of Gudhi format files).
+ **/
+ Persistence_landscape_on_grid(const char* filename, double grid_min_, double grid_max_, size_t number_of_points_,
+ uint16_t dimension_ = std::numeric_limits<uint16_t>::max());
+
+ /**
+ * Constructor that reads persistence intervals from file and creates persistence landscape. The format of the
+ *input file is the following: in each line we put birth-death pair. Last line is assumed
+ * to be empty. Even if the points within a line are not ordered, they will be ordered while the input is read.
+ *The additional parameter is the resolution of a grid and the number of landscape
+ * functions to be created. The remaining parameters are calculated based on data.
+ **/
+ Persistence_landscape_on_grid(const char* filename, size_t number_of_points, unsigned number_of_levels_of_landscape,
+ uint16_t dimension = std::numeric_limits<uint16_t>::max());
+
+ /**
+ * Constructor that reads persistence intervals from file and creates persistence landscape. The format of the input
+ *file is the following: in each line we put birth-death pair. Last line is assumed
+ * to be empty. Even if the points within a line are not ordered, they will be ordered while the input is read. The
+ *additional parameter is the resolution of a grid. The last parameter is the dimension
+ * of a persistence to read from the file. If your file contains only persistence pair in a single dimension, please
+ *set it up to std::numeric_limits<unsigned>::max().
+ * The remaining parameters are calculated based on data.
+ **/
+ Persistence_landscape_on_grid(const char* filename, size_t number_of_points,
+ uint16_t dimension = std::numeric_limits<uint16_t>::max());
+
+ /**
+ * This procedure loads a landscape from file. It erase all the data that was previously stored in this landscape.
+ **/
+ void load_landscape_from_file(const char* filename);
+
+ /**
+ * The procedure stores a landscape to a file. The file can be later used by a procedure load_landscape_from_file.
+ **/
+ void print_to_file(const char* filename) const;
+
+ /**
+ * This function compute integral of the landscape (defined formally as sum of integrals on R of all landscape
+ *functions)
+ **/
+ double compute_integral_of_landscape() const {
+ size_t maximal_level = this->number_of_nonzero_levels();
+ double result = 0;
+ for (size_t i = 0; i != maximal_level; ++i) {
+ result += this->compute_integral_of_landscape(i);
+ }
+ return result;
+ }
+
+ /**
+ * This function compute integral of the 'level'-level of a landscape.
+ **/
+ double compute_integral_of_landscape(size_t level) const {
+ bool dbg = false;
+ double result = 0;
+ double dx = (this->grid_max - this->grid_min) / static_cast<double>(this->values_of_landscapes.size() - 1);
+
+ if (dbg) {
+ std::cerr << "this->grid_max : " << this->grid_max << std::endl;
+ std::cerr << "this->grid_min : " << this->grid_min << std::endl;
+ std::cerr << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl;
+ getchar();
+ }
+
+ double previous_x = this->grid_min - dx;
+ double previous_y = 0;
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ double current_x = previous_x + dx;
+ double current_y = 0;
+ if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level];
+
+ if (dbg) {
+ std::cerr << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size()
+ << " , level : " << level << std::endl;
+ if (this->values_of_landscapes[i].size() > level)
+ std::cerr << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl;
+ std::cerr << "previous_y : " << previous_y << std::endl;
+ std::cerr << "current_y : " << current_y << std::endl;
+ std::cerr << "dx : " << dx << std::endl;
+ std::cerr << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl;
+ }
+
+ result += 0.5 * dx * (previous_y + current_y);
+ previous_x = current_x;
+ previous_y = current_y;
+ }
+ return result;
+ }
+
+ /**
+ * This function compute integral of the landscape p-th power of a landscape (defined formally as sum of integrals on
+ *R of p-th powers of all landscape functions)
+ **/
+ double compute_integral_of_landscape(double p) const {
+ size_t maximal_level = this->number_of_nonzero_levels();
+ double result = 0;
+ for (size_t i = 0; i != maximal_level; ++i) {
+ result += this->compute_integral_of_landscape(p, i);
+ }
+ return result;
+ }
+
+ /**
+ * This function compute integral of the landscape p-th power of a level of a landscape (defined formally as sum
+ *of integrals on R of p-th powers of all landscape functions)
+ **/
+ double compute_integral_of_landscape(double p, size_t level) const {
+ bool dbg = false;
+
+ double result = 0;
+ double dx = (this->grid_max - this->grid_min) / static_cast<double>(this->values_of_landscapes.size() - 1);
+ double previous_x = this->grid_min;
+ double previous_y = 0;
+ if (this->values_of_landscapes[0].size() > level) previous_y = this->values_of_landscapes[0][level];
+
+ if (dbg) {
+ std::cerr << "dx : " << dx << std::endl;
+ std::cerr << "previous_x : " << previous_x << std::endl;
+ std::cerr << "previous_y : " << previous_y << std::endl;
+ std::cerr << "power : " << p << std::endl;
+ getchar();
+ }
+
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ double current_x = previous_x + dx;
+ double current_y = 0;
+ if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level];
+
+ if (dbg) std::cerr << "current_y : " << current_y << std::endl;
+
+ if (current_y == previous_y) continue;
+
+ std::pair<double, double> coef =
+ compute_parameters_of_a_line(std::make_pair(previous_x, previous_y), std::make_pair(current_x, current_y));
+ double a = coef.first;
+ double b = coef.second;
+
+ if (dbg) {
+ std::cerr << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x
+ << "," << current_y << ") is : " << a << "x+" << b << std::endl;
+ }
+
+ // In this interval, the landscape has a form f(x) = ax+b. We want to compute integral of (ax+b)^p = 1/a *
+ // (ax+b)^{p+1}/(p+1)
+ double value_to_add = 0;
+ if (a != 0) {
+ value_to_add = 1 / (a * (p + 1)) * (pow((a * current_x + b), p + 1) - pow((a * previous_x + b), p + 1));
+ } else {
+ value_to_add = (current_x - previous_x) * (pow(b, p));
+ }
+ result += value_to_add;
+ if (dbg) {
+ std::cerr << "Increasing result by : " << value_to_add << std::endl;
+ std::cerr << "result : " << result << std::endl;
+ getchar();
+ }
+ previous_x = current_x;
+ previous_y = current_y;
+ }
+ if (dbg) std::cerr << "The total result is : " << result << std::endl;
+ return result;
+ }
+
+ /**
+* Writing landscape into a stream. A i-th level landscape starts with a string "lambda_i". Then the discontinuity points
+*of the landscapes follows.
+* Shall those points be joined with lines, we will obtain the i-th landscape function.
+**/
+ friend std::ostream& operator<<(std::ostream& out, const Persistence_landscape_on_grid& land) {
+ double dx = (land.grid_max - land.grid_min) / static_cast<double>(land.values_of_landscapes.size() - 1);
+ double x = land.grid_min;
+ for (size_t i = 0; i != land.values_of_landscapes.size(); ++i) {
+ out << x << " : ";
+ for (size_t j = 0; j != land.values_of_landscapes[i].size(); ++j) {
+ out << land.values_of_landscapes[i][j] << " ";
+ }
+ out << std::endl;
+ x += dx;
+ }
+ return out;
+ }
+
+ template <typename oper>
+ friend Persistence_landscape_on_grid operation_on_pair_of_landscapes_on_grid(
+ const Persistence_landscape_on_grid& land1, const Persistence_landscape_on_grid& land2);
+
+ /**
+ * A function that computes the value of a landscape at a given point. The parameters of the function are: unsigned
+ *level and double x.
+ * The procedure will compute the value of the level-landscape at the point x.
+ **/
+ double compute_value_at_a_given_point(unsigned level, double x) const {
+ bool dbg = false;
+ if ((x < this->grid_min) || (x > this->grid_max)) return 0;
+
+ // find a position of a vector closest to x:
+ double dx = (this->grid_max - this->grid_min) / static_cast<double>(this->values_of_landscapes.size() - 1);
+ size_t position = size_t((x - this->grid_min) / dx);
+
+ if (dbg) {
+ std::cerr << "This is a procedure compute_value_at_a_given_point \n";
+ std::cerr << "level : " << level << std::endl;
+ std::cerr << "x : " << x << std::endl;
+ std::cerr << "position : " << position << std::endl;
+ }
+ // check if we are not exactly in the grid point:
+ if (almost_equal(position * dx + this->grid_min, x)) {
+ if (this->values_of_landscapes[position].size() < level) {
+ return this->values_of_landscapes[position][level];
+ } else {
+ return 0;
+ }
+ }
+ // in the other case, approximate with a line:
+ std::pair<double, double> line;
+ if ((this->values_of_landscapes[position].size() > level) &&
+ (this->values_of_landscapes[position + 1].size() > level)) {
+ line = compute_parameters_of_a_line(
+ std::make_pair(position * dx + this->grid_min, this->values_of_landscapes[position][level]),
+ std::make_pair((position + 1) * dx + this->grid_min, this->values_of_landscapes[position + 1][level]));
+ } else {
+ if ((this->values_of_landscapes[position].size() > level) ||
+ (this->values_of_landscapes[position + 1].size() > level)) {
+ if ((this->values_of_landscapes[position].size() > level)) {
+ line = compute_parameters_of_a_line(
+ std::make_pair(position * dx + this->grid_min, this->values_of_landscapes[position][level]),
+ std::make_pair((position + 1) * dx + this->grid_min, 0));
+ } else {
+ line = compute_parameters_of_a_line(
+ std::make_pair(position * dx + this->grid_min, 0),
+ std::make_pair((position + 1) * dx + this->grid_min, this->values_of_landscapes[position + 1][level]));
+ }
+ } else {
+ return 0;
+ }
+ }
+ // compute the value of the linear function parametrized by line on a point x:
+ return line.first * x + line.second;
+ }
+
+ public:
+ /**
+ *\private A function that compute sum of two landscapes.
+ **/
+ friend Persistence_landscape_on_grid add_two_landscapes(const Persistence_landscape_on_grid& land1,
+ const Persistence_landscape_on_grid& land2) {
+ return operation_on_pair_of_landscapes_on_grid<std::plus<double> >(land1, land2);
+ }
+
+ /**
+ *\private A function that compute difference of two landscapes.
+ **/
+ friend Persistence_landscape_on_grid subtract_two_landscapes(const Persistence_landscape_on_grid& land1,
+ const Persistence_landscape_on_grid& land2) {
+ return operation_on_pair_of_landscapes_on_grid<std::minus<double> >(land1, land2);
+ }
+
+ /**
+ * An operator +, that compute sum of two landscapes.
+ **/
+ friend Persistence_landscape_on_grid operator+(const Persistence_landscape_on_grid& first,
+ const Persistence_landscape_on_grid& second) {
+ return add_two_landscapes(first, second);
+ }
+
+ /**
+ * An operator -, that compute difference of two landscapes.
+ **/
+ friend Persistence_landscape_on_grid operator-(const Persistence_landscape_on_grid& first,
+ const Persistence_landscape_on_grid& second) {
+ return subtract_two_landscapes(first, second);
+ }
+
+ /**
+ * An operator * that allows multiplication of a landscape by a real number.
+ **/
+ friend Persistence_landscape_on_grid operator*(const Persistence_landscape_on_grid& first, double con) {
+ return first.multiply_lanscape_by_real_number_not_overwrite(con);
+ }
+
+ /**
+ * An operator * that allows multiplication of a landscape by a real number (order of parameters swapped).
+ **/
+ friend Persistence_landscape_on_grid operator*(double con, const Persistence_landscape_on_grid& first) {
+ return first.multiply_lanscape_by_real_number_not_overwrite(con);
+ }
+
+ friend bool check_if_defined_on_the_same_domain(const Persistence_landscape_on_grid& land1,
+ const Persistence_landscape_on_grid& land2) {
+ if (land1.values_of_landscapes.size() != land2.values_of_landscapes.size()) return false;
+ if (land1.grid_min != land2.grid_min) return false;
+ if (land1.grid_max != land2.grid_max) return false;
+ return true;
+ }
+
+ /**
+ * Operator +=. The second parameter is persistence landscape.
+ **/
+ Persistence_landscape_on_grid operator+=(const Persistence_landscape_on_grid& rhs) {
+ *this = *this + rhs;
+ return *this;
+ }
+
+ /**
+ * Operator -=. The second parameter is persistence landscape.
+ **/
+ Persistence_landscape_on_grid operator-=(const Persistence_landscape_on_grid& rhs) {
+ *this = *this - rhs;
+ return *this;
+ }
+
+ /**
+ * Operator *=. The second parameter is a real number by which the y values of all landscape functions are multiplied.
+ *The x-values remain unchanged.
+ **/
+ Persistence_landscape_on_grid operator*=(double x) {
+ *this = *this * x;
+ return *this;
+ }
+
+ /**
+ * Operator /=. The second parameter is a real number.
+ **/
+ Persistence_landscape_on_grid operator/=(double x) {
+ if (x == 0) throw("In operator /=, division by 0. Program terminated.");
+ *this = *this * (1 / x);
+ return *this;
+ }
+
+ /**
+ * An operator to compare two persistence landscapes.
+ **/
+ bool operator==(const Persistence_landscape_on_grid& rhs) const {
+ bool dbg = true;
+ if (this->values_of_landscapes.size() != rhs.values_of_landscapes.size()) {
+ if (dbg) std::cerr << "values_of_landscapes of incompatible sizes\n";
+ return false;
+ }
+ if (!almost_equal(this->grid_min, rhs.grid_min)) {
+ if (dbg) std::cerr << "grid_min not equal\n";
+ return false;
+ }
+ if (!almost_equal(this->grid_max, rhs.grid_max)) {
+ if (dbg) std::cerr << "grid_max not equal\n";
+ return false;
+ }
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ for (size_t aa = 0; aa != this->values_of_landscapes[i].size(); ++aa) {
+ if (!almost_equal(this->values_of_landscapes[i][aa], rhs.values_of_landscapes[i][aa])) {
+ if (dbg) {
+ std::cerr << "Problem in the position : " << i << " of values_of_landscapes. \n";
+ std::cerr << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl;
+ }
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * An operator to compare two persistence landscapes.
+ **/
+ bool operator!=(const Persistence_landscape_on_grid& rhs) const { return !((*this) == rhs); }
+
+ /**
+ * Computations of maximum (y) value of landscape.
+ **/
+ double compute_maximum() const {
+ // since the function can only be entirely positive or negative, the maximal value will be an extremal value in the
+ // arrays:
+ double max_value = -std::numeric_limits<double>::max();
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ if (this->values_of_landscapes[i].size()) {
+ if (this->values_of_landscapes[i][0] > max_value) max_value = this->values_of_landscapes[i][0];
+ if (this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1] > max_value)
+ max_value = this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1];
+ }
+ }
+ return max_value;
+ }
+
+ /**
+ * Computations of minimum and maximum value of landscape.
+ **/
+ std::pair<double, double> compute_minimum_maximum() const {
+ // since the function can only be entirely positive or negative, the maximal value will be an extremal value in the
+ // arrays:
+ double max_value = -std::numeric_limits<double>::max();
+ double min_value = 0;
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ if (this->values_of_landscapes[i].size()) {
+ if (this->values_of_landscapes[i][0] > max_value) max_value = this->values_of_landscapes[i][0];
+ if (this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1] > max_value)
+ max_value = this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1];
+
+ if (this->values_of_landscapes[i][0] < min_value) min_value = this->values_of_landscapes[i][0];
+ if (this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1] < min_value)
+ min_value = this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1];
+ }
+ }
+ return std::make_pair(min_value, max_value);
+ }
+
+ /**
+ * This procedure returns x-range of a given level persistence landscape. If a default value is used, the x-range
+ * of 0th level landscape is given (and this range contains the ranges of all other landscapes).
+ **/
+ std::pair<double, double> get_x_range(size_t level = 0) const {
+ return std::make_pair(this->grid_min, this->grid_max);
+ }
+
+ /**
+ * This procedure returns y-range of a persistence landscape. If a default value is used, the y-range
+ * of 0th level landscape is given (and this range contains the ranges of all other landscapes).
+ **/
+ std::pair<double, double> get_y_range(size_t level = 0) const { return this->compute_minimum_maximum(); }
+
+ /**
+ * This function computes maximal lambda for which lambda-level landscape is nonzero.
+ **/
+ size_t number_of_nonzero_levels() const {
+ size_t result = 0;
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ if (this->values_of_landscapes[i].size() > result) result = this->values_of_landscapes[i].size();
+ }
+ return result;
+ }
+
+ /**
+ * Computations of a \f$L^i\f$ norm of landscape, where i is the input parameter.
+ **/
+ double compute_norm_of_landscape(double i) const {
+ std::vector<std::pair<double, double> > p;
+ Persistence_landscape_on_grid l(p, this->grid_min, this->grid_max, this->values_of_landscapes.size() - 1);
+
+ if (i < std::numeric_limits<double>::max()) {
+ return compute_distance_of_landscapes_on_grid(*this, l, i);
+ } else {
+ return compute_max_norm_distance_of_landscapes(*this, l);
+ }
+ }
+
+ /**
+ * An operator to compute the value of a landscape in the level 'level' at the argument 'x'.
+ **/
+ double operator()(unsigned level, double x) const { return this->compute_value_at_a_given_point(level, x); }
+
+ /**
+ * Computations of \f$L^{\infty}\f$ distance between two landscapes.
+ **/
+ friend double compute_max_norm_distance_of_landscapes(const Persistence_landscape_on_grid& first,
+ const Persistence_landscape_on_grid& second);
+
+ /**
+ * Function to compute absolute value of a PL function. The representation of persistence landscapes allow to store
+ *general PL-function. When computing distance between two landscapes, we compute difference between
+ * them. In this case, a general PL-function with negative value can appear as a result. Then in order to compute
+ *distance, we need to take its absolute value. This is the purpose of this procedure.
+ **/
+ void abs() {
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ for (size_t j = 0; j != this->values_of_landscapes[i].size(); ++j) {
+ this->values_of_landscapes[i][j] = std::abs(this->values_of_landscapes[i][j]);
+ }
+ }
+ }
+
+ /**
+ * Computes the number of landscape functions.
+ **/
+ size_t size() const { return this->number_of_nonzero_levels(); }
+
+ /**
+ * Compute maximal value of lambda-level landscape.
+ **/
+ double find_max(unsigned lambda) const {
+ double max_value = -std::numeric_limits<double>::max();
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ if (this->values_of_landscapes[i].size() > lambda) {
+ if (this->values_of_landscapes[i][lambda] > max_value) max_value = this->values_of_landscapes[i][lambda];
+ }
+ }
+ return max_value;
+ }
+
+ /**
+ * Function to compute inner (scalar) product of two landscapes.
+ **/
+ friend double compute_inner_product(const Persistence_landscape_on_grid& l1,
+ const Persistence_landscape_on_grid& l2) {
+ if (!check_if_defined_on_the_same_domain(l1, l2))
+ throw "Landscapes are not defined on the same grid, the program will now terminate";
+ size_t maximal_level = l1.number_of_nonzero_levels();
+ double result = 0;
+ for (size_t i = 0; i != maximal_level; ++i) {
+ result += compute_inner_product(l1, l2, i);
+ }
+ return result;
+ }
+
+ /**
+ * Function to compute inner (scalar) product of given levels of two landscapes.
+ **/
+ friend double compute_inner_product(const Persistence_landscape_on_grid& l1, const Persistence_landscape_on_grid& l2,
+ size_t level) {
+ bool dbg = false;
+
+ if (!check_if_defined_on_the_same_domain(l1, l2))
+ throw "Landscapes are not defined on the same grid, the program will now terminate";
+ double result = 0;
+
+ double dx = (l1.grid_max - l1.grid_min) / static_cast<double>(l1.values_of_landscapes.size() - 1);
+
+ double previous_x = l1.grid_min - dx;
+ double previous_y_l1 = 0;
+ double previous_y_l2 = 0;
+ for (size_t i = 0; i != l1.values_of_landscapes.size(); ++i) {
+ if (dbg) std::cerr << "i : " << i << std::endl;
+
+ double current_x = previous_x + dx;
+ double current_y_l1 = 0;
+ if (l1.values_of_landscapes[i].size() > level) current_y_l1 = l1.values_of_landscapes[i][level];
+
+ double current_y_l2 = 0;
+ if (l2.values_of_landscapes[i].size() > level) current_y_l2 = l2.values_of_landscapes[i][level];
+
+ if (dbg) {
+ std::cerr << "previous_x : " << previous_x << std::endl;
+ std::cerr << "previous_y_l1 : " << previous_y_l1 << std::endl;
+ std::cerr << "current_y_l1 : " << current_y_l1 << std::endl;
+ std::cerr << "previous_y_l2 : " << previous_y_l2 << std::endl;
+ std::cerr << "current_y_l2 : " << current_y_l2 << std::endl;
+ }
+
+ std::pair<double, double> l1_coords = compute_parameters_of_a_line(std::make_pair(previous_x, previous_y_l1),
+ std::make_pair(current_x, current_y_l1));
+ std::pair<double, double> l2_coords = compute_parameters_of_a_line(std::make_pair(previous_x, previous_y_l2),
+ std::make_pair(current_x, current_y_l2));
+
+ // let us assume that the first line is of a form y = ax+b, and the second one is of a form y = cx + d. Then here
+ // are a,b,c,d:
+ double a = l1_coords.first;
+ double b = l1_coords.second;
+
+ double c = l2_coords.first;
+ double d = l2_coords.second;
+
+ if (dbg) {
+ std::cerr << "Here are the formulas for a line: \n";
+ std::cerr << "a : " << a << std::endl;
+ std::cerr << "b : " << b << std::endl;
+ std::cerr << "c : " << c << std::endl;
+ std::cerr << "d : " << d << std::endl;
+ }
+
+ // now, to compute the inner product in this interval we need to compute the integral of (ax+b)(cx+d) = acx^2 +
+ // (ad+bc)x + bd in the interval from previous_x to current_x:
+ // The integral is ac/3*x^3 + (ac+bd)/2*x^2 + bd*x
+
+ double added_value = (a * c / 3 * current_x * current_x * current_x +
+ (a * d + b * c) / 2 * current_x * current_x + b * d * current_x) -
+ (a * c / 3 * previous_x * previous_x * previous_x +
+ (a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x);
+
+ if (dbg) {
+ std::cerr << "Value of the integral on the left end i.e. : " << previous_x << " is : "
+ << a * c / 3 * previous_x * previous_x * previous_x + (a * d + b * c) / 2 * previous_x * previous_x +
+ b * d * previous_x
+ << std::endl;
+ std::cerr << "Value of the integral on the right end i.e. : " << current_x << " is "
+ << a * c / 3 * current_x * current_x * current_x + (a * d + b * c) / 2 * current_x * current_x +
+ b * d * current_x
+ << std::endl;
+ }
+
+ result += added_value;
+
+ if (dbg) {
+ std::cerr << "added_value : " << added_value << std::endl;
+ std::cerr << "result : " << result << std::endl;
+ getchar();
+ }
+
+ previous_x = current_x;
+ previous_y_l1 = current_y_l1;
+ previous_y_l2 = current_y_l2;
+ }
+ return result;
+ }
+
+ /**
+ * Computations of \f$L^{p}\f$ distance between two landscapes on a grid. p is the parameter of the procedure.
+ * FIXME: Note that, due to the grid representation, the method below may give non--accurate results in case when the
+ *landscape P and Q the difference of which we want to compute
+ * are intersecting. This is a consequence of a general way they are computed. In the future, an integral of absolute
+ *value of a difference of P and Q will be given as a separated
+ * function to fix that inaccuracy.
+ **/
+ friend double compute_distance_of_landscapes_on_grid(const Persistence_landscape_on_grid& first,
+ const Persistence_landscape_on_grid& second, double p) {
+ bool dbg = false;
+ // This is what we want to compute: (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p). We will do it one step at a
+ // time:
+
+ if (dbg) {
+ std::cerr << "first : " << first << std::endl;
+ std::cerr << "second : " << second << std::endl;
+ getchar();
+ }
+
+ // first-second :
+ Persistence_landscape_on_grid lan = first - second;
+
+ if (dbg) {
+ std::cerr << "Difference : " << lan << std::endl;
+ }
+
+ //| first-second |:
+ lan.abs();
+
+ if (dbg) {
+ std::cerr << "Abs : " << lan << std::endl;
+ }
+
+ if (p < std::numeric_limits<double>::max()) {
+ // \int_{- \infty}^{+\infty}| first-second |^p
+ double result;
+ if (p != 1) {
+ if (dbg) {
+ std::cerr << "p : " << p << std::endl;
+ getchar();
+ }
+ result = lan.compute_integral_of_landscape(p);
+ if (dbg) {
+ std::cerr << "integral : " << result << std::endl;
+ getchar();
+ }
+ } else {
+ result = lan.compute_integral_of_landscape();
+ if (dbg) {
+ std::cerr << "integral, without power : " << result << std::endl;
+ getchar();
+ }
+ }
+ // (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p)
+ return pow(result, 1.0 / p);
+ } else {
+ // p == infty
+ return lan.compute_maximum();
+ }
+ }
+
+ // Functions that are needed for that class to implement the concept.
+
+ /**
+ * The number of projections to R is defined to the number of nonzero landscape functions. I-th projection is an
+ *integral of i-th landscape function over whole R.
+ * This function is required by the Real_valued_topological_data concept.
+ * At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
+ *using it, keep in mind that it
+ * will be most likely changed in the next versions.
+ **/
+ double project_to_R(int number_of_function) const {
+ return this->compute_integral_of_landscape((size_t)number_of_function);
+ }
+
+ /**
+ * The function gives the number of possible projections to R. This function is required by the
+ *Real_valued_topological_data concept.
+ **/
+ size_t number_of_projections_to_R() const { return number_of_functions_for_projections_to_reals; }
+
+ /**
+ * This function produce a vector of doubles based on a landscape. It is required in a concept
+ * Vectorized_topological_data
+ */
+ std::vector<double> vectorize(int number_of_function) const {
+ // TODO(PD) think of something smarter over here
+ if ((number_of_function < 0) || ((size_t)number_of_function >= this->values_of_landscapes.size())) {
+ throw "Wrong number of function\n";
+ }
+ std::vector<double> v(this->values_of_landscapes.size());
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ v[i] = 0;
+ if (this->values_of_landscapes[i].size() > (size_t)number_of_function) {
+ v[i] = this->values_of_landscapes[i][number_of_function];
+ }
+ }
+ return v;
+ }
+
+ /**
+ * This function return the number of functions that allows vectorization of persistence landscape. It is required in
+ *a concept Vectorized_topological_data.
+ **/
+ size_t number_of_vectorize_functions() const { return number_of_functions_for_vectorization; }
+
+ /**
+ * A function to compute averaged persistence landscape on a grid, based on vector of persistence landscapes on grid.
+ * This function is required by Topological_data_with_averages concept.
+ **/
+ void compute_average(const std::vector<Persistence_landscape_on_grid*>& to_average) {
+ bool dbg = false;
+ // After execution of this procedure, the average is supposed to be in the current object. To make sure that this is
+ // the case, we need to do some cleaning first.
+ this->values_of_landscapes.clear();
+ this->grid_min = this->grid_max = 0;
+
+ // if there is nothing to average, then the average is a zero landscape.
+ if (to_average.size() == 0) return;
+
+ // now we need to check if the grids in all objects of to_average are the same:
+ for (size_t i = 0; i != to_average.size(); ++i) {
+ if (!check_if_defined_on_the_same_domain(*(to_average[0]), *(to_average[i])))
+ throw "Two grids are not compatible";
+ }
+
+ this->values_of_landscapes = std::vector<std::vector<double> >((to_average[0])->values_of_landscapes.size());
+ this->grid_min = (to_average[0])->grid_min;
+ this->grid_max = (to_average[0])->grid_max;
+
+ if (dbg) {
+ std::cerr << "Computations of average. The data from the current landscape have been cleared. We are ready to do "
+ "the computations. \n";
+ }
+
+ // for every point in the grid:
+ for (size_t grid_point = 0; grid_point != (to_average[0])->values_of_landscapes.size(); ++grid_point) {
+ // set up a vector of the correct size:
+ size_t maximal_size_of_vector = 0;
+ for (size_t land_no = 0; land_no != to_average.size(); ++land_no) {
+ if ((to_average[land_no])->values_of_landscapes[grid_point].size() > maximal_size_of_vector)
+ maximal_size_of_vector = (to_average[land_no])->values_of_landscapes[grid_point].size();
+ }
+ this->values_of_landscapes[grid_point] = std::vector<double>(maximal_size_of_vector);
+
+ if (dbg) {
+ std::cerr << "We are considering the point : " << grid_point
+ << " of the grid. In this point, there are at most : " << maximal_size_of_vector
+ << " nonzero landscape functions \n";
+ }
+
+ // and compute an arithmetic average:
+ for (size_t land_no = 0; land_no != to_average.size(); ++land_no) {
+ // summing:
+ for (size_t i = 0; i != (to_average[land_no])->values_of_landscapes[grid_point].size(); ++i) {
+ // compute the average in a smarter way.
+ this->values_of_landscapes[grid_point][i] += (to_average[land_no])->values_of_landscapes[grid_point][i];
+ }
+ }
+ // normalizing:
+ for (size_t i = 0; i != this->values_of_landscapes[grid_point].size(); ++i) {
+ this->values_of_landscapes[grid_point][i] /= static_cast<double>(to_average.size());
+ }
+ }
+ } // compute_average
+
+ /**
+ * A function to compute distance between persistence landscape on a grid.
+ * The parameter of this function is a Persistence_landscape_on_grid.
+ * This function is required in Topological_data_with_distances concept.
+ * For max norm distance, set power to std::numeric_limits<double>::max()
+ **/
+ double distance(const Persistence_landscape_on_grid& second, double power = 1) const {
+ if (power < std::numeric_limits<double>::max()) {
+ return compute_distance_of_landscapes_on_grid(*this, second, power);
+ } else {
+ return compute_max_norm_distance_of_landscapes(*this, second);
+ }
+ }
+
+ /**
+ * A function to compute scalar product of persistence landscape on a grid.
+ * The parameter of this function is a Persistence_landscape_on_grid.
+ * This function is required in Topological_data_with_scalar_product concept.
+ **/
+ double compute_scalar_product(const Persistence_landscape_on_grid& second) {
+ return compute_inner_product((*this), second);
+ }
+
+ // end of implementation of functions needed for concepts.
+
+ /**
+ * A function that returns values of landscapes. It can be used for visualization
+ **/
+ std::vector<std::vector<double> > output_for_visualization() const { return this->values_of_landscapes; }
+
+ /**
+ * function used to create a gnuplot script for visualization of landscapes. Over here we need to specify which
+ *landscapes do we want to plot.
+ * In addition, the user may specify the range (min and max) where landscape is plot. The default values for min and
+ *max are std::numeric_limits<double>::max(). If the procedure detect those
+ * values, it will determine the range so that the whole landscape is supported there. If at least one min or max value
+ *is different from std::numeric_limits<double>::max(), then the values
+ * provided by the user will be used.
+ **/
+ void plot(const char* filename, size_t from_, size_t to_) const {
+ this->plot(filename, std::numeric_limits<double>::max(), std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::max(), std::numeric_limits<double>::max(), from_, to_);
+ }
+
+ /**
+ * function used to create a gnuplot script for visualization of landscapes. Over here we can restrict also x and y
+ *range of the landscape.
+ **/
+ void plot(const char* filename, double min_x = std::numeric_limits<double>::max(),
+ double max_x = std::numeric_limits<double>::max(), double min_y = std::numeric_limits<double>::max(),
+ double max_y = std::numeric_limits<double>::max(), size_t from_ = std::numeric_limits<size_t>::max(),
+ size_t to_ = std::numeric_limits<size_t>::max()) const;
+
+ protected:
+ double grid_min;
+ double grid_max;
+ std::vector<std::vector<double> > values_of_landscapes;
+ size_t number_of_functions_for_vectorization;
+ size_t number_of_functions_for_projections_to_reals;
+
+ void set_up_numbers_of_functions_for_vectorization_and_projections_to_reals() {
+ // warning, this function can be only called after filling in the values_of_landscapes vector.
+ this->number_of_functions_for_vectorization = this->values_of_landscapes.size();
+ this->number_of_functions_for_projections_to_reals = this->values_of_landscapes.size();
+ }
+ void set_up_values_of_landscapes(const std::vector<std::pair<double, double> >& p, double grid_min_, double grid_max_,
+ size_t number_of_points_,
+ unsigned number_of_levels = std::numeric_limits<unsigned>::max());
+ Persistence_landscape_on_grid multiply_lanscape_by_real_number_not_overwrite(double x) const;
+};
+
+void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vector<std::pair<double, double> >& p,
+ double grid_min_, double grid_max_,
+ size_t number_of_points_, unsigned number_of_levels) {
+ bool dbg = false;
+ if (dbg) {
+ std::cerr << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_
+ << ", grid_max_ : " << grid_max_ << ", number_of_points_ : " << number_of_points_
+ << ", number_of_levels: " << number_of_levels << std::endl;
+ std::cerr << "Here are the intervals at our disposal : \n";
+ for (size_t i = 0; i != p.size(); ++i) {
+ std::cerr << p[i].first << " , " << p[i].second << std::endl;
+ }
+ }
+
+ if ((grid_min_ == std::numeric_limits<double>::max()) || (grid_max_ == std::numeric_limits<double>::max())) {
+ // in this case, we need to find grid_min_ and grid_min_ based on the data.
+ double min = std::numeric_limits<double>::max();
+ double max = std::numeric_limits<double>::min();
+ for (size_t i = 0; i != p.size(); ++i) {
+ if (p[i].first < min) min = p[i].first;
+ if (p[i].second > max) max = p[i].second;
+ }
+ if (grid_min_ == std::numeric_limits<double>::max()) {
+ grid_min_ = min;
+ } else {
+ // in this case grid_max_ == std::numeric_limits<double>::max()
+ grid_max_ = max;
+ }
+ }
+
+ // if number_of_levels == std::numeric_limits<size_t>::max(), then we will have all the nonzero values of landscapes,
+ // and will store them in a vector
+ // if number_of_levels != std::numeric_limits<size_t>::max(), then we will use those vectors as heaps.
+ this->values_of_landscapes = std::vector<std::vector<double> >(number_of_points_ + 1);
+
+ this->grid_min = grid_min_;
+ this->grid_max = grid_max_;
+
+ if (grid_max_ <= grid_min_) {
+ throw "Wrong parameters of grid_min and grid_max given to the procedure. The program will now terminate.\n";
+ }
+
+ double dx = (grid_max_ - grid_min_) / static_cast<double>(number_of_points_);
+ // for every interval in the diagram:
+ for (size_t int_no = 0; int_no != p.size(); ++int_no) {
+ size_t grid_interval_begin = (p[int_no].first - grid_min_) / dx;
+ size_t grid_interval_end = (p[int_no].second - grid_min_) / dx;
+ size_t grid_interval_midpoint = (size_t)(0.5 * (grid_interval_begin + grid_interval_end));
+
+ if (dbg) {
+ std::cerr << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl;
+
+ std::cerr << "grid_interval_begin : " << grid_interval_begin << std::endl;
+ std::cerr << "grid_interval_end : " << grid_interval_end << std::endl;
+ std::cerr << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl;
+ }
+
+ double landscape_value = dx;
+ for (size_t i = grid_interval_begin + 1; i < grid_interval_midpoint; ++i) {
+ if (dbg) {
+ std::cerr << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value
+ << std::endl;
+ }
+ if (number_of_levels != std::numeric_limits<unsigned>::max()) {
+ // we have a heap of no more that number_of_levels values.
+ // Note that if we are using heaps, we want to know the shortest distance in the heap.
+ // This is achieved by putting -distance to the heap.
+ if (this->values_of_landscapes[i].size() >= number_of_levels) {
+ // in this case, the full heap is build, and we need to check if the landscape_value is not larger than the
+ // smallest element in the heap.
+ if (-landscape_value < this->values_of_landscapes[i].front()) {
+ // if it is, we remove the largest value in the heap, and move on.
+ std::pop_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1] = -landscape_value;
+ std::push_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ }
+ } else {
+ // in this case we are still filling in the array.
+ this->values_of_landscapes[i].push_back(-landscape_value);
+ if (this->values_of_landscapes[i].size() == number_of_levels - 1) {
+ // this->values_of_landscapes[i].size() == number_of_levels
+ // in this case we need to create the heap.
+ std::make_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ }
+ }
+ } else {
+ // we have vector of all values
+ this->values_of_landscapes[i].push_back(landscape_value);
+ }
+ landscape_value += dx;
+ }
+ for (size_t i = grid_interval_midpoint; i <= grid_interval_end; ++i) {
+ if (landscape_value > 0) {
+ if (number_of_levels != std::numeric_limits<unsigned>::max()) {
+ // we have a heap of no more that number_of_levels values
+ if (this->values_of_landscapes[i].size() >= number_of_levels) {
+ // in this case, the full heap is build, and we need to check if the landscape_value is not larger than the
+ // smallest element in the heap.
+ if (-landscape_value < this->values_of_landscapes[i].front()) {
+ // if it is, we remove the largest value in the heap, and move on.
+ std::pop_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ this->values_of_landscapes[i][this->values_of_landscapes[i].size() - 1] = -landscape_value;
+ std::push_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ }
+ } else {
+ // in this case we are still filling in the array.
+ this->values_of_landscapes[i].push_back(-landscape_value);
+ if (this->values_of_landscapes[i].size() == number_of_levels - 1) {
+ // this->values_of_landscapes[i].size() == number_of_levels
+ // in this case we need to create the heap.
+ std::make_heap(this->values_of_landscapes[i].begin(), this->values_of_landscapes[i].end());
+ }
+ }
+ } else {
+ this->values_of_landscapes[i].push_back(landscape_value);
+ }
+
+ if (dbg) {
+ std::cerr << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value
+ << std::endl;
+ }
+ }
+ landscape_value -= dx;
+ }
+ }
+
+ if (number_of_levels != std::numeric_limits<unsigned>::max()) {
+ // in this case, vectors are used as heaps. And, since we want to have the smallest element at the top of
+ // each heap, we store minus distances. To get if right at the end, we need to multiply each value
+ // in the heap by -1 to get real vector of distances.
+ for (size_t pt = 0; pt != this->values_of_landscapes.size(); ++pt) {
+ for (size_t j = 0; j != this->values_of_landscapes[pt].size(); ++j) {
+ this->values_of_landscapes[pt][j] *= -1;
+ }
+ }
+ }
+
+ // and now we need to sort the values:
+ for (size_t pt = 0; pt != this->values_of_landscapes.size(); ++pt) {
+ std::sort(this->values_of_landscapes[pt].begin(), this->values_of_landscapes[pt].end(), std::greater<double>());
+ }
+} // set_up_values_of_landscapes
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const std::vector<std::pair<double, double> >& p,
+ double grid_min_, double grid_max_,
+ size_t number_of_points_) {
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_);
+} // Persistence_landscape_on_grid
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const std::vector<std::pair<double, double> >& p,
+ double grid_min_, double grid_max_,
+ size_t number_of_points_,
+ unsigned number_of_levels_of_landscape) {
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_, number_of_levels_of_landscape);
+}
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const char* filename, double grid_min_, double grid_max_,
+ size_t number_of_points_, uint16_t dimension) {
+ std::vector<std::pair<double, double> > p;
+ if (dimension == std::numeric_limits<uint16_t>::max()) {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_);
+}
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const char* filename, double grid_min_, double grid_max_,
+ size_t number_of_points_,
+ unsigned number_of_levels_of_landscape,
+ uint16_t dimension) {
+ std::vector<std::pair<double, double> > p;
+ if (dimension == std::numeric_limits<uint16_t>::max()) {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_, number_of_levels_of_landscape);
+}
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const char* filename, size_t number_of_points_,
+ uint16_t dimension) {
+ std::vector<std::pair<double, double> > p;
+ if (dimension == std::numeric_limits<uint16_t>::max()) {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ double grid_min_ = std::numeric_limits<double>::max();
+ double grid_max_ = -std::numeric_limits<double>::max();
+ for (size_t i = 0; i != p.size(); ++i) {
+ if (p[i].first < grid_min_) grid_min_ = p[i].first;
+ if (p[i].second > grid_max_) grid_max_ = p[i].second;
+ }
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_);
+}
+
+Persistence_landscape_on_grid::Persistence_landscape_on_grid(const char* filename, size_t number_of_points_,
+ unsigned number_of_levels_of_landscape,
+ uint16_t dimension) {
+ std::vector<std::pair<double, double> > p;
+ if (dimension == std::numeric_limits<uint16_t>::max()) {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ p = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ double grid_min_ = std::numeric_limits<double>::max();
+ double grid_max_ = -std::numeric_limits<double>::max();
+ for (size_t i = 0; i != p.size(); ++i) {
+ if (p[i].first < grid_min_) grid_min_ = p[i].first;
+ if (p[i].second > grid_max_) grid_max_ = p[i].second;
+ }
+ this->set_up_values_of_landscapes(p, grid_min_, grid_max_, number_of_points_, number_of_levels_of_landscape);
+}
+
+void Persistence_landscape_on_grid::load_landscape_from_file(const char* filename) {
+ std::ifstream in;
+ in.open(filename);
+ // check if the file exist.
+ if (!in.good()) {
+ std::cerr << "The file : " << filename << " do not exist. The program will now terminate \n";
+ throw "The persistence landscape file do not exist. The program will now terminate \n";
+ }
+
+ size_t number_of_points_in_the_grid = 0;
+ in >> this->grid_min >> this->grid_max >> number_of_points_in_the_grid;
+
+ std::vector<std::vector<double> > v(number_of_points_in_the_grid);
+ std::string line;
+ std::getline(in, line);
+ double number;
+ for (size_t i = 0; i != number_of_points_in_the_grid; ++i) {
+ // read a line of a file and convert it to a vector.
+ std::vector<double> vv;
+ std::getline(in, line);
+ std::istringstream stream(line);
+ while (stream >> number) {
+ vv.push_back(number);
+ }
+ v[i] = vv;
+ }
+ this->values_of_landscapes = v;
+ in.close();
+}
+
+void Persistence_landscape_on_grid::print_to_file(const char* filename) const {
+ std::ofstream out;
+ out.open(filename);
+
+ // first we store the parameters of the grid:
+ out << grid_min << std::endl << grid_max << std::endl << this->values_of_landscapes.size() << std::endl;
+
+ // and now in the following lines, the values of this->values_of_landscapes for the following arguments:
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ for (size_t j = 0; j != this->values_of_landscapes[i].size(); ++j) {
+ out << this->values_of_landscapes[i][j] << " ";
+ }
+ out << std::endl;
+ }
+
+ out.close();
+}
+
+void Persistence_landscape_on_grid::plot(const char* filename, double min_x, double max_x, double min_y, double max_y,
+ size_t from_, size_t to_) const {
+ // this program create a gnuplot script file that allows to plot persistence diagram.
+ std::ofstream out;
+
+ std::ostringstream gnuplot_script;
+ gnuplot_script << filename << "_GnuplotScript";
+ out.open(gnuplot_script.str().c_str());
+
+ if (min_x == max_x) {
+ std::pair<double, double> min_max = compute_minimum_maximum();
+ out << "set xrange [" << this->grid_min << " : " << this->grid_max << "]" << std::endl;
+ out << "set yrange [" << min_max.first << " : " << min_max.second << "]" << std::endl;
+ } else {
+ out << "set xrange [" << min_x << " : " << max_x << "]" << std::endl;
+ out << "set yrange [" << min_y << " : " << max_y << "]" << std::endl;
+ }
+
+ size_t number_of_nonzero_levels = this->number_of_nonzero_levels();
+ double dx = (this->grid_max - this->grid_min) / static_cast<double>(this->values_of_landscapes.size() - 1);
+
+ size_t from = 0;
+ if (from_ != std::numeric_limits<size_t>::max()) {
+ if (from_ < number_of_nonzero_levels) {
+ from = from_;
+ } else {
+ return;
+ }
+ }
+ size_t to = number_of_nonzero_levels;
+ if (to_ != std::numeric_limits<size_t>::max()) {
+ if (to_ < number_of_nonzero_levels) {
+ to = to_;
+ }
+ }
+
+ out << "plot ";
+ for (size_t lambda = from; lambda != to; ++lambda) {
+ out << " '-' using 1:2 notitle with lp";
+ if (lambda + 1 != to) {
+ out << ", \\";
+ }
+ out << std::endl;
+ }
+
+ for (size_t lambda = from; lambda != to; ++lambda) {
+ double point = this->grid_min;
+ for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
+ double value = 0;
+ if (this->values_of_landscapes[i].size() > lambda) {
+ value = this->values_of_landscapes[i][lambda];
+ }
+ out << point << " " << value << std::endl;
+ point += dx;
+ }
+ out << "EOF" << std::endl;
+ }
+ std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ << gnuplot_script.str().c_str() << "\'\"" << std::endl;
+}
+
+template <typename T>
+Persistence_landscape_on_grid operation_on_pair_of_landscapes_on_grid(const Persistence_landscape_on_grid& land1,
+ const Persistence_landscape_on_grid& land2) {
+ // first we need to check if the domains are the same:
+ if (!check_if_defined_on_the_same_domain(land1, land2)) throw "Two grids are not compatible";
+
+ T oper;
+ Persistence_landscape_on_grid result;
+ result.values_of_landscapes = std::vector<std::vector<double> >(land1.values_of_landscapes.size());
+ result.grid_min = land1.grid_min;
+ result.grid_max = land1.grid_max;
+
+ // now we perform the operations:
+ for (size_t grid_point = 0; grid_point != land1.values_of_landscapes.size(); ++grid_point) {
+ result.values_of_landscapes[grid_point] = std::vector<double>(
+ std::max(land1.values_of_landscapes[grid_point].size(), land2.values_of_landscapes[grid_point].size()));
+ for (size_t lambda = 0; lambda != std::max(land1.values_of_landscapes[grid_point].size(),
+ land2.values_of_landscapes[grid_point].size());
+ ++lambda) {
+ double value1 = 0;
+ double value2 = 0;
+ if (lambda < land1.values_of_landscapes[grid_point].size())
+ value1 = land1.values_of_landscapes[grid_point][lambda];
+ if (lambda < land2.values_of_landscapes[grid_point].size())
+ value2 = land2.values_of_landscapes[grid_point][lambda];
+ result.values_of_landscapes[grid_point][lambda] = oper(value1, value2);
+ }
+ }
+
+ return result;
+}
+
+Persistence_landscape_on_grid Persistence_landscape_on_grid::multiply_lanscape_by_real_number_not_overwrite(
+ double x) const {
+ Persistence_landscape_on_grid result;
+ result.values_of_landscapes = std::vector<std::vector<double> >(this->values_of_landscapes.size());
+ result.grid_min = this->grid_min;
+ result.grid_max = this->grid_max;
+
+ for (size_t grid_point = 0; grid_point != this->values_of_landscapes.size(); ++grid_point) {
+ result.values_of_landscapes[grid_point] = std::vector<double>(this->values_of_landscapes[grid_point].size());
+ for (size_t i = 0; i != this->values_of_landscapes[grid_point].size(); ++i) {
+ result.values_of_landscapes[grid_point][i] = x * this->values_of_landscapes[grid_point][i];
+ }
+ }
+
+ return result;
+}
+
+double compute_max_norm_distance_of_landscapes(const Persistence_landscape_on_grid& first,
+ const Persistence_landscape_on_grid& second) {
+ double result = 0;
+
+ // first we need to check if first and second is defined on the same domain"
+ if (!check_if_defined_on_the_same_domain(first, second)) throw "Two grids are not compatible";
+
+ for (size_t i = 0; i != first.values_of_landscapes.size(); ++i) {
+ for (size_t j = 0; j != std::min(first.values_of_landscapes[i].size(), second.values_of_landscapes[i].size());
+ ++j) {
+ if (result < abs(first.values_of_landscapes[i][j] - second.values_of_landscapes[i][j])) {
+ result = abs(first.values_of_landscapes[i][j] - second.values_of_landscapes[i][j]);
+ }
+ }
+ if (first.values_of_landscapes[i].size() ==
+ std::min(first.values_of_landscapes[i].size(), second.values_of_landscapes[i].size())) {
+ for (size_t j = first.values_of_landscapes[i].size(); j != second.values_of_landscapes[i].size(); ++j) {
+ if (result < second.values_of_landscapes[i][j]) result = second.values_of_landscapes[i][j];
+ }
+ }
+ if (second.values_of_landscapes[i].size() ==
+ std::min(first.values_of_landscapes[i].size(), second.values_of_landscapes[i].size())) {
+ for (size_t j = second.values_of_landscapes[i].size(); j != first.values_of_landscapes[i].size(); ++j) {
+ if (result < first.values_of_landscapes[i][j]) result = first.values_of_landscapes[i][j];
+ }
+ }
+ }
+ return result;
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_LANDSCAPE_ON_GRID_H_
diff --git a/include/gudhi/Persistence_vectors.h b/include/gudhi/Persistence_vectors.h
new file mode 100644
index 00000000..63577e46
--- /dev/null
+++ b/include/gudhi/Persistence_vectors.h
@@ -0,0 +1,640 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PERSISTENCE_VECTORS_H_
+#define PERSISTENCE_VECTORS_H_
+
+// gudhi include
+#include <gudhi/read_persistence_from_file.h>
+#include <gudhi/common_persistence_representations.h>
+#include <gudhi/distance_functions.h>
+
+#include <fstream>
+#include <cmath>
+#include <algorithm>
+#include <iostream>
+#include <limits>
+#include <functional>
+#include <utility>
+#include <vector>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+template <typename T>
+struct Maximum_distance {
+ double operator()(const std::pair<T, T>& f, const std::pair<T, T>& s) {
+ return std::max(fabs(f.first - s.first), fabs(f.second - s.second));
+ }
+};
+
+/**
+ * \class Vector_distances_in_diagram Persistence_vectors.h gudhi/Persistence_vectors.h
+ * \brief A class implementing persistence vectors.
+ *
+ * \ingroup Persistence_representations
+ *
+ * \details
+ * This is an implementation of idea presented in the paper <i>Stable Topological Signatures for Points on 3D
+ * Shapes</i> \cite Carriere_Oudot_Ovsjanikov_top_signatures_3d .<br>
+ * The parameter of the class is the class that computes distance used to construct the vectors. The typical function
+ * is either Euclidean of maximum (Manhattan) distance.
+ *
+ * This class implements the following concepts: Vectorized_topological_data, Topological_data_with_distances,
+ * Real_valued_topological_data, Topological_data_with_averages, Topological_data_with_scalar_product
+ **/
+template <typename F>
+class Vector_distances_in_diagram {
+ public:
+ /**
+ * The default constructor.
+ **/
+ Vector_distances_in_diagram() {}
+
+ /**
+ * The constructor that takes as an input a multiset of persistence intervals (given as vector of birth-death
+ *pairs). The second parameter is the desired length of the output vectors.
+ **/
+ Vector_distances_in_diagram(const std::vector<std::pair<double, double> >& intervals, size_t where_to_cut);
+
+ /**
+ * The constructor taking as an input a file with birth-death pairs. The second parameter is the desired length of
+ *the output vectors.
+ **/
+ Vector_distances_in_diagram(const char* filename, size_t where_to_cut,
+ unsigned dimension = std::numeric_limits<unsigned>::max());
+
+ /**
+ * Writing to a stream.
+ **/
+ template <typename K>
+ friend std::ostream& operator<<(std::ostream& out, const Vector_distances_in_diagram<K>& d) {
+ for (size_t i = 0; i != std::min(d.sorted_vector_of_distances.size(), d.where_to_cut); ++i) {
+ out << d.sorted_vector_of_distances[i] << " ";
+ }
+ return out;
+ }
+
+ /**
+ * This procedure gives the value of a vector on a given position.
+ **/
+ inline double vector_in_position(size_t position) const {
+ if (position >= this->sorted_vector_of_distances.size())
+ throw("Wrong position in accessing Vector_distances_in_diagram::sorted_vector_of_distances\n");
+ return this->sorted_vector_of_distances[position];
+ }
+
+ /**
+ * Return a size of a vector.
+ **/
+ inline size_t size() const { return this->sorted_vector_of_distances.size(); }
+
+ /**
+ * Write a vector to a file.
+ **/
+ void write_to_file(const char* filename) const;
+
+ /**
+ * Write a vector to a file.
+ **/
+ void print_to_file(const char* filename) const { this->write_to_file(filename); }
+
+ /**
+ * Loading a vector to a file.
+ **/
+ void load_from_file(const char* filename);
+
+ /**
+ * Comparison operators:
+ **/
+ bool operator==(const Vector_distances_in_diagram& second) const {
+ if (this->sorted_vector_of_distances.size() != second.sorted_vector_of_distances.size()) return false;
+ for (size_t i = 0; i != this->sorted_vector_of_distances.size(); ++i) {
+ if (!almost_equal(this->sorted_vector_of_distances[i], second.sorted_vector_of_distances[i])) return false;
+ }
+ return true;
+ }
+
+ bool operator!=(const Vector_distances_in_diagram& second) const { return !(*this == second); }
+
+ // Implementations of functions for various concepts.
+ /**
+ * Compute projection to real numbers of persistence vector. This function is required by the
+ *Real_valued_topological_data concept
+ * At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
+ *using it, keep in mind that it
+ * will be most likely changed in the next versions.
+ **/
+ double project_to_R(int number_of_function) const;
+ /**
+ * The function gives the number of possible projections to R. This function is required by the
+ *Real_valued_topological_data concept.
+ **/
+ size_t number_of_projections_to_R() const { return this->number_of_functions_for_projections_to_reals; }
+
+ /**
+ * Compute a vectorization of a persistent vectors. It is required in a concept Vectorized_topological_data.
+ **/
+ std::vector<double> vectorize(int number_of_function) const;
+ /**
+ * This function return the number of functions that allows vectorization of a persistence vector. It is required
+ *in a concept Vectorized_topological_data.
+ **/
+ size_t number_of_vectorize_functions() const { return this->number_of_functions_for_vectorization; }
+
+ /**
+ * Compute a average of two persistent vectors. This function is required by Topological_data_with_averages concept.
+ **/
+ void compute_average(const std::vector<Vector_distances_in_diagram*>& to_average);
+
+ /**
+ * Compute a distance of two persistent vectors. This function is required in Topological_data_with_distances concept.
+ * For max norm distance, set power to std::numeric_limits<double>::max()
+ **/
+ double distance(const Vector_distances_in_diagram& second, double power = 1) const;
+
+ /**
+ * Compute a scalar product of two persistent vectors. This function is required in
+ *Topological_data_with_scalar_product concept.
+ **/
+ double compute_scalar_product(const Vector_distances_in_diagram& second) const;
+ // end of implementation of functions needed for concepts.
+
+ /**
+ * For visualization use output from vectorize and build histograms.
+ **/
+ std::vector<double> output_for_visualization() const { return this->sorted_vector_of_distances; }
+
+ /**
+ * Create a gnuplot script to visualize the data structure.
+ **/
+ void plot(const char* filename) const {
+ std::stringstream gnuplot_script;
+ gnuplot_script << filename << "_GnuplotScript";
+ std::ofstream out;
+ out.open(gnuplot_script.str().c_str());
+ out << "set style data histogram" << std::endl;
+ out << "set style histogram cluster gap 1" << std::endl;
+ out << "set style fill solid border -1" << std::endl;
+ out << "plot '-' notitle" << std::endl;
+ for (size_t i = 0; i != this->sorted_vector_of_distances.size(); ++i) {
+ out << this->sorted_vector_of_distances[i] << std::endl;
+ }
+ out << std::endl;
+ out.close();
+ std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ << gnuplot_script.str().c_str() << "\'\"" << std::endl;
+ }
+
+ /**
+ * The x-range of the persistence vector.
+ **/
+ std::pair<double, double> get_x_range() const { return std::make_pair(0, this->sorted_vector_of_distances.size()); }
+
+ /**
+ * The y-range of the persistence vector.
+ **/
+ std::pair<double, double> get_y_range() const {
+ if (this->sorted_vector_of_distances.size() == 0) return std::make_pair(0, 0);
+ return std::make_pair(this->sorted_vector_of_distances[0], 0);
+ }
+
+ // arithmetic operations:
+ template <typename Operation_type>
+ friend Vector_distances_in_diagram operation_on_pair_of_vectors(const Vector_distances_in_diagram& first,
+ const Vector_distances_in_diagram& second,
+ Operation_type opertion) {
+ Vector_distances_in_diagram result;
+ // Operation_type operation;
+ result.sorted_vector_of_distances.reserve(
+ std::max(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size()));
+ for (size_t i = 0; i != std::min(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size());
+ ++i) {
+ result.sorted_vector_of_distances.push_back(
+ opertion(first.sorted_vector_of_distances[i], second.sorted_vector_of_distances[i]));
+ }
+ if (first.sorted_vector_of_distances.size() ==
+ std::min(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size())) {
+ for (size_t i = std::min(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size());
+ i != std::max(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size()); ++i) {
+ result.sorted_vector_of_distances.push_back(opertion(0, second.sorted_vector_of_distances[i]));
+ }
+ } else {
+ for (size_t i = std::min(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size());
+ i != std::max(first.sorted_vector_of_distances.size(), second.sorted_vector_of_distances.size()); ++i) {
+ result.sorted_vector_of_distances.push_back(opertion(first.sorted_vector_of_distances[i], 0));
+ }
+ }
+ return result;
+ } // operation_on_pair_of_vectors
+
+ /**
+ * This function implements an operation of multiplying Vector_distances_in_diagram by a scalar.
+ **/
+ Vector_distances_in_diagram multiply_by_scalar(double scalar) const {
+ Vector_distances_in_diagram result;
+ result.sorted_vector_of_distances.reserve(this->sorted_vector_of_distances.size());
+ for (size_t i = 0; i != this->sorted_vector_of_distances.size(); ++i) {
+ result.sorted_vector_of_distances.push_back(scalar * this->sorted_vector_of_distances[i]);
+ }
+ return result;
+ } // multiply_by_scalar
+
+ /**
+ * This function computes a sum of two objects of a type Vector_distances_in_diagram.
+ **/
+ friend Vector_distances_in_diagram operator+(const Vector_distances_in_diagram& first,
+ const Vector_distances_in_diagram& second) {
+ return operation_on_pair_of_vectors(first, second, std::plus<double>());
+ }
+ /**
+* This function computes a difference of two objects of a type Vector_distances_in_diagram.
+**/
+ friend Vector_distances_in_diagram operator-(const Vector_distances_in_diagram& first,
+ const Vector_distances_in_diagram& second) {
+ return operation_on_pair_of_vectors(first, second, std::minus<double>());
+ }
+ /**
+* This function computes a product of an object of a type Vector_distances_in_diagram with real number.
+**/
+ friend Vector_distances_in_diagram operator*(double scalar, const Vector_distances_in_diagram& A) {
+ return A.multiply_by_scalar(scalar);
+ }
+ /**
+* This function computes a product of an object of a type Vector_distances_in_diagram with real number.
+**/
+ friend Vector_distances_in_diagram operator*(const Vector_distances_in_diagram& A, double scalar) {
+ return A.multiply_by_scalar(scalar);
+ }
+ /**
+* This function computes a product of an object of a type Vector_distances_in_diagram with real number.
+**/
+ Vector_distances_in_diagram operator*(double scalar) { return this->multiply_by_scalar(scalar); }
+ /**
+ * += operator for Vector_distances_in_diagram.
+ **/
+ Vector_distances_in_diagram operator+=(const Vector_distances_in_diagram& rhs) {
+ *this = *this + rhs;
+ return *this;
+ }
+ /**
+ * -= operator for Vector_distances_in_diagram.
+ **/
+ Vector_distances_in_diagram operator-=(const Vector_distances_in_diagram& rhs) {
+ *this = *this - rhs;
+ return *this;
+ }
+ /**
+ * *= operator for Vector_distances_in_diagram.
+ **/
+ Vector_distances_in_diagram operator*=(double x) {
+ *this = *this * x;
+ return *this;
+ }
+ /**
+ * /= operator for Vector_distances_in_diagram.
+ **/
+ Vector_distances_in_diagram operator/=(double x) {
+ if (x == 0) throw("In operator /=, division by 0. Program terminated.");
+ *this = *this * (1 / x);
+ return *this;
+ }
+
+ private:
+ std::vector<std::pair<double, double> > intervals;
+ std::vector<double> sorted_vector_of_distances;
+ size_t number_of_functions_for_vectorization;
+ size_t number_of_functions_for_projections_to_reals;
+ size_t where_to_cut;
+
+ void compute_sorted_vector_of_distances_via_heap(size_t where_to_cut);
+ void compute_sorted_vector_of_distances_via_vector_sorting(size_t where_to_cut);
+
+ Vector_distances_in_diagram(const std::vector<double>& sorted_vector_of_distances_)
+ : sorted_vector_of_distances(sorted_vector_of_distances_) {
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+ }
+
+ void set_up_numbers_of_functions_for_vectorization_and_projections_to_reals() {
+ // warning, this function can be only called after filling in the intervals vector.
+ this->number_of_functions_for_vectorization = this->sorted_vector_of_distances.size();
+ this->number_of_functions_for_projections_to_reals = this->sorted_vector_of_distances.size();
+ }
+};
+
+template <typename F>
+Vector_distances_in_diagram<F>::Vector_distances_in_diagram(const std::vector<std::pair<double, double> >& intervals_,
+ size_t where_to_cut_)
+ : where_to_cut(where_to_cut_) {
+ std::vector<std::pair<double, double> > i(intervals_);
+ this->intervals = i;
+ // this->compute_sorted_vector_of_distances_via_heap( where_to_cut );
+ this->compute_sorted_vector_of_distances_via_vector_sorting(where_to_cut);
+ this->set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+}
+
+template <typename F>
+Vector_distances_in_diagram<F>::Vector_distances_in_diagram(const char* filename, size_t where_to_cut,
+ unsigned dimension)
+ : where_to_cut(where_to_cut) {
+ std::vector<std::pair<double, double> > intervals;
+ if (dimension == std::numeric_limits<unsigned>::max()) {
+ intervals = read_persistence_intervals_in_one_dimension_from_file(filename);
+ } else {
+ intervals = read_persistence_intervals_in_one_dimension_from_file(filename, dimension);
+ }
+ this->intervals = intervals;
+ this->compute_sorted_vector_of_distances_via_heap(where_to_cut);
+ // this->compute_sorted_vector_of_distances_via_vector_sorting( where_to_cut );
+ set_up_numbers_of_functions_for_vectorization_and_projections_to_reals();
+}
+
+template <typename F>
+void Vector_distances_in_diagram<F>::compute_sorted_vector_of_distances_via_heap(size_t where_to_cut) {
+ bool dbg = false;
+ if (dbg) {
+ std::cerr << "Here are the intervals : \n";
+ for (size_t i = 0; i != this->intervals.size(); ++i) {
+ std::cerr << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
+ }
+ }
+ where_to_cut = std::min(
+ where_to_cut, (size_t)(0.5 * this->intervals.size() * (this->intervals.size() - 1) + this->intervals.size()));
+
+ std::vector<double> heap(where_to_cut, std::numeric_limits<int>::max());
+ std::make_heap(heap.begin(), heap.end());
+ F f;
+
+ // for every pair of points in the diagram, compute the minimum of their distance, and distance of those points from
+ // diagonal
+ for (size_t i = 0; i < this->intervals.size(); ++i) {
+ for (size_t j = i + 1; j < this->intervals.size(); ++j) {
+ double value = std::min(
+ f(this->intervals[i], this->intervals[j]),
+ std::min(
+ f(this->intervals[i], std::make_pair(0.5 * (this->intervals[i].first + this->intervals[i].second),
+ 0.5 * (this->intervals[i].first + this->intervals[i].second))),
+ f(this->intervals[j], std::make_pair(0.5 * (this->intervals[j].first + this->intervals[j].second),
+ 0.5 * (this->intervals[j].first + this->intervals[j].second)))));
+
+ if (dbg) {
+ std::cerr << "Value : " << value << std::endl;
+ std::cerr << "heap.front() : " << heap.front() << std::endl;
+ getchar();
+ }
+
+ if (-value < heap.front()) {
+ if (dbg) {
+ std::cerr << "Replacing : " << heap.front() << " with : " << -value << std::endl;
+ getchar();
+ }
+ // remove the first element from the heap
+ std::pop_heap(heap.begin(), heap.end());
+ // heap.pop_back();
+ // and put value there instead:
+ // heap.push_back(-value);
+ heap[where_to_cut - 1] = -value;
+ std::push_heap(heap.begin(), heap.end());
+ }
+ }
+ }
+
+ // now add distances of all points from diagonal
+ for (size_t i = 0; i < this->intervals.size(); ++i) {
+ double value = f(this->intervals[i], std::make_pair(0.5 * (this->intervals[i].first + this->intervals[i].second),
+ 0.5 * (this->intervals[i].first + this->intervals[i].second)));
+ if (-value < heap.front()) {
+ // remove the first element from the heap
+ std::pop_heap(heap.begin(), heap.end());
+ // heap.pop_back();
+ // and put value there instead:
+ // heap.push_back(-value);
+ heap[where_to_cut - 1] = -value;
+ std::push_heap(heap.begin(), heap.end());
+ }
+ }
+
+ std::sort_heap(heap.begin(), heap.end());
+ for (size_t i = 0; i != heap.size(); ++i) {
+ if (heap[i] == std::numeric_limits<int>::max()) {
+ heap[i] = 0;
+ } else {
+ heap[i] *= -1;
+ }
+ }
+
+ if (dbg) {
+ std::cerr << "This is the heap after all the operations :\n";
+ for (size_t i = 0; i != heap.size(); ++i) {
+ std::cout << heap[i] << " ";
+ }
+ std::cout << std::endl;
+ }
+
+ this->sorted_vector_of_distances = heap;
+}
+
+template <typename F>
+void Vector_distances_in_diagram<F>::compute_sorted_vector_of_distances_via_vector_sorting(size_t where_to_cut) {
+ std::vector<double> distances;
+ distances.reserve((size_t)(0.5 * this->intervals.size() * (this->intervals.size() - 1) + this->intervals.size()));
+ F f;
+
+ // for every pair of points in the diagram, compute the minimum of their distance, and distance of those points from
+ // diagonal
+ for (size_t i = 0; i < this->intervals.size(); ++i) {
+ // add distance of i-th point in the diagram from the diagonal to the distances vector
+ distances.push_back(
+ f(this->intervals[i], std::make_pair(0.5 * (this->intervals[i].first + this->intervals[i].second),
+ 0.5 * (this->intervals[i].first + this->intervals[i].second))));
+ for (size_t j = i + 1; j < this->intervals.size(); ++j) {
+ double value = std::min(
+ f(this->intervals[i], this->intervals[j]),
+ std::min(
+ f(this->intervals[i], std::make_pair(0.5 * (this->intervals[i].first + this->intervals[i].second),
+ 0.5 * (this->intervals[i].first + this->intervals[i].second))),
+ f(this->intervals[j], std::make_pair(0.5 * (this->intervals[j].first + this->intervals[j].second),
+ 0.5 * (this->intervals[j].first + this->intervals[j].second)))));
+ distances.push_back(value);
+ }
+ }
+ std::sort(distances.begin(), distances.end(), std::greater<double>());
+ if (distances.size() > where_to_cut) distances.resize(where_to_cut);
+
+ this->sorted_vector_of_distances = distances;
+}
+
+// Implementations of functions for various concepts.
+template <typename F>
+double Vector_distances_in_diagram<F>::project_to_R(int number_of_function) const {
+ if ((size_t)number_of_function > this->number_of_functions_for_projections_to_reals)
+ throw "Wrong index of a function in a method Vector_distances_in_diagram<F>::project_to_R";
+ if (number_of_function < 0)
+ throw "Wrong index of a function in a method Vector_distances_in_diagram<F>::project_to_R";
+
+ double result = 0;
+ for (size_t i = 0; i != (size_t)number_of_function; ++i) {
+ result += sorted_vector_of_distances[i];
+ }
+ return result;
+}
+
+template <typename F>
+void Vector_distances_in_diagram<F>::compute_average(const std::vector<Vector_distances_in_diagram*>& to_average) {
+ if (to_average.size() == 0) {
+ (*this) = Vector_distances_in_diagram<F>();
+ return;
+ }
+
+ size_t maximal_length_of_vector = 0;
+ for (size_t i = 0; i != to_average.size(); ++i) {
+ if (to_average[i]->sorted_vector_of_distances.size() > maximal_length_of_vector) {
+ maximal_length_of_vector = to_average[i]->sorted_vector_of_distances.size();
+ }
+ }
+
+ std::vector<double> av(maximal_length_of_vector, 0);
+ for (size_t i = 0; i != to_average.size(); ++i) {
+ for (size_t j = 0; j != to_average[i]->sorted_vector_of_distances.size(); ++j) {
+ av[j] += to_average[i]->sorted_vector_of_distances[j];
+ }
+ }
+
+ for (size_t i = 0; i != maximal_length_of_vector; ++i) {
+ av[i] /= static_cast<double>(to_average.size());
+ }
+ this->sorted_vector_of_distances = av;
+ this->where_to_cut = av.size();
+}
+
+template <typename F>
+double Vector_distances_in_diagram<F>::distance(const Vector_distances_in_diagram& second_, double power) const {
+ bool dbg = false;
+
+ if (dbg) {
+ std::cerr << "Entering double Vector_distances_in_diagram<F>::distance( const Abs_Topological_data_with_distances* "
+ "second , double power ) procedure \n";
+ std::cerr << "Power : " << power << std::endl;
+ std::cerr << "This : " << *this << std::endl;
+ std::cerr << "second : " << second_ << std::endl;
+ }
+
+ double result = 0;
+ for (size_t i = 0; i != std::min(this->sorted_vector_of_distances.size(), second_.sorted_vector_of_distances.size());
+ ++i) {
+ if (power == 1) {
+ if (dbg) {
+ std::cerr << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
+ << " | : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i])
+ << std::endl;
+ }
+ result += fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]);
+ } else {
+ if (power < std::numeric_limits<double>::max()) {
+ result += std::pow(fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]), power);
+ } else {
+ // max norm
+ if (result < fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]))
+ result = fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]);
+ }
+ if (dbg) {
+ std::cerr << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
+ << " : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i])
+ << std::endl;
+ }
+ }
+ }
+ if (this->sorted_vector_of_distances.size() != second_.sorted_vector_of_distances.size()) {
+ if (this->sorted_vector_of_distances.size() > second_.sorted_vector_of_distances.size()) {
+ for (size_t i = second_.sorted_vector_of_distances.size(); i != this->sorted_vector_of_distances.size(); ++i) {
+ result += fabs(this->sorted_vector_of_distances[i]);
+ }
+ } else {
+ // this->sorted_vector_of_distances.size() < second_.sorted_vector_of_distances.size()
+ for (size_t i = this->sorted_vector_of_distances.size(); i != second_.sorted_vector_of_distances.size(); ++i) {
+ result += fabs(second_.sorted_vector_of_distances[i]);
+ }
+ }
+ }
+
+ if (power != 1) {
+ result = std::pow(result, (1.0 / power));
+ }
+ return result;
+}
+
+template <typename F>
+std::vector<double> Vector_distances_in_diagram<F>::vectorize(int number_of_function) const {
+ if ((size_t)number_of_function > this->number_of_functions_for_vectorization)
+ throw "Wrong index of a function in a method Vector_distances_in_diagram<F>::vectorize";
+ if (number_of_function < 0) throw "Wrong index of a function in a method Vector_distances_in_diagram<F>::vectorize";
+
+ std::vector<double> result(std::min((size_t)number_of_function, this->sorted_vector_of_distances.size()));
+ for (size_t i = 0; i != std::min((size_t)number_of_function, this->sorted_vector_of_distances.size()); ++i) {
+ result[i] = this->sorted_vector_of_distances[i];
+ }
+ return result;
+}
+
+template <typename F>
+void Vector_distances_in_diagram<F>::write_to_file(const char* filename) const {
+ std::ofstream out;
+ out.open(filename);
+
+ for (size_t i = 0; i != this->sorted_vector_of_distances.size(); ++i) {
+ out << this->sorted_vector_of_distances[i] << " ";
+ }
+
+ out.close();
+}
+
+template <typename F>
+void Vector_distances_in_diagram<F>::load_from_file(const char* filename) {
+ std::ifstream in;
+ in.open(filename);
+ // check if the file exist.
+ if (!in.good()) {
+ std::cerr << "The file : " << filename << " do not exist. The program will now terminate \n";
+ throw "The persistence landscape file do not exist. The program will now terminate \n";
+ }
+
+ double number;
+ while (in >> number) {
+ this->sorted_vector_of_distances.push_back(number);
+ }
+ in.close();
+}
+
+template <typename F>
+double Vector_distances_in_diagram<F>::compute_scalar_product(const Vector_distances_in_diagram& second_vector) const {
+ double result = 0;
+ for (size_t i = 0;
+ i != std::min(this->sorted_vector_of_distances.size(), second_vector.sorted_vector_of_distances.size()); ++i) {
+ result += this->sorted_vector_of_distances[i] * second_vector.sorted_vector_of_distances[i];
+ }
+ return result;
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // PERSISTENCE_VECTORS_H_
diff --git a/include/gudhi/Simplex_tree.h b/include/gudhi/Simplex_tree.h
index 37b3ea97..7456cb1f 100644
--- a/include/gudhi/Simplex_tree.h
+++ b/include/gudhi/Simplex_tree.h
@@ -49,6 +49,7 @@
#include <initializer_list>
#include <algorithm> // for std::max
#include <cstdint> // for std::uint32_t
+#include <iterator> // for std::distance
namespace Gudhi {
@@ -106,8 +107,9 @@ class Simplex_tree {
};
struct Key_simplex_base_dummy {
Key_simplex_base_dummy() {}
- void assign_key(Simplex_key) { }
- Simplex_key key() const { assert(false); return -1; }
+ // Undefined so it will not link
+ void assign_key(Simplex_key);
+ Simplex_key key() const;
};
typedef typename std::conditional<Options::store_key, Key_simplex_base_real, Key_simplex_base_dummy>::type
Key_simplex_base;
@@ -121,7 +123,7 @@ class Simplex_tree {
};
struct Filtration_simplex_base_dummy {
Filtration_simplex_base_dummy() {}
- void assign_filtration(Filtration_value f) { assert(f == 0); }
+ void assign_filtration(Filtration_value GUDHI_CHECK_code(f)) { GUDHI_CHECK(f == 0, "filtration value specified for a complex that does not store them"); }
Filtration_value filtration() const { return 0; }
};
typedef typename std::conditional<Options::store_filtration, Filtration_simplex_base_real,
@@ -391,13 +393,13 @@ class Simplex_tree {
return sh->second.key();
}
- /** \brief Returns the simplex associated to a key.
+ /** \brief Returns the simplex that has index idx in the filtration.
*
* The filtration must be initialized.
* \pre SimplexTreeOptions::store_key
*/
- Simplex_handle simplex(Simplex_key key) const {
- return filtration_vect_[key];
+ Simplex_handle simplex(Simplex_key idx) const {
+ return filtration_vect_[idx];
}
/** \brief Returns the filtration value of a simplex.
@@ -482,7 +484,17 @@ class Simplex_tree {
}
/** \brief Returns an upper bound on the dimension of the simplicial complex. */
- int dimension() const {
+ int upper_bound_dimension() const {
+ return dimension_;
+ }
+
+ /** \brief Returns the dimension of the simplicial complex.
+ \details This function is not constant time because it can recompute dimension if required (can be triggered by
+ `remove_maximal_simplex()` or `prune_above_filtration()`).
+ */
+ int dimension() {
+ if (dimension_to_be_lowered_)
+ lower_upper_bound_dimension();
return dimension_;
}
@@ -490,6 +502,7 @@ class Simplex_tree {
* sh has children.*/
template<class SimplexHandle>
bool has_children(SimplexHandle sh) const {
+ // Here we rely on the root using null_vertex(), which cannot match any real vertex.
return (sh->second.children()->parent() == sh->first);
}
@@ -519,18 +532,30 @@ class Simplex_tree {
Simplex_handle find_simplex(const std::vector<Vertex_handle> & simplex) {
Siblings * tmp_sib = &root_;
Dictionary_it tmp_dit;
- Vertex_handle last = simplex.back();
- for (auto v : simplex) {
- tmp_dit = tmp_sib->members_.find(v);
- if (tmp_dit == tmp_sib->members_.end()) {
+ auto vi = simplex.begin();
+ if (Options::contiguous_vertices) {
+ // Equivalent to the first iteration of the normal loop
+ GUDHI_CHECK(contiguous_vertices(), "non-contiguous vertices");
+ Vertex_handle v = *vi++;
+ if(v < 0 || v >= static_cast<Vertex_handle>(root_.members_.size()))
return null_simplex();
- }
- if (!has_children(tmp_dit) && v != last) {
+ tmp_dit = root_.members_.begin() + v;
+ if (vi == simplex.end())
+ return tmp_dit;
+ if (!has_children(tmp_dit))
+ return null_simplex();
+ tmp_sib = tmp_dit->second.children();
+ }
+ for (;;) {
+ tmp_dit = tmp_sib->members_.find(*vi++);
+ if (tmp_dit == tmp_sib->members_.end())
+ return null_simplex();
+ if (vi == simplex.end())
+ return tmp_dit;
+ if (!has_children(tmp_dit))
return null_simplex();
- }
tmp_sib = tmp_dit->second.children();
}
- return tmp_dit;
}
/** \brief Returns the Simplex_handle corresponding to the 0-simplex
@@ -574,12 +599,14 @@ class Simplex_tree {
std::pair<Simplex_handle, bool> res_insert;
auto vi = simplex.begin();
for (; vi != simplex.end() - 1; ++vi) {
+ GUDHI_CHECK(*vi != null_vertex(), "cannot use the dummy null_vertex() as a real vertex");
res_insert = curr_sib->members_.emplace(*vi, Node(curr_sib, filtration));
if (!(has_children(res_insert.first))) {
res_insert.first->second.assign_children(new Siblings(curr_sib, *vi));
}
curr_sib = res_insert.first->second.children();
}
+ GUDHI_CHECK(*vi != null_vertex(), "cannot use the dummy null_vertex() as a real vertex");
res_insert = curr_sib->members_.emplace(*vi, Node(curr_sib, filtration));
if (!res_insert.second) {
// if already in the complex
@@ -591,7 +618,11 @@ class Simplex_tree {
// if filtration value unchanged
return std::pair<Simplex_handle, bool>(null_simplex(), false);
}
- // otherwise the insertion has succeeded
+ // otherwise the insertion has succeeded - size is a size_type
+ if (static_cast<int>(simplex.size()) - 1 > dimension_) {
+ // Update dimension if needed
+ dimension_ = static_cast<int>(simplex.size()) - 1;
+ }
return res_insert;
}
@@ -650,71 +681,67 @@ class Simplex_tree {
*/
template<class InputVertexRange = std::initializer_list<Vertex_handle>>
std::pair<Simplex_handle, bool> insert_simplex_and_subfaces(const InputVertexRange& Nsimplex,
- Filtration_value filtration = 0) {
+ Filtration_value filtration = 0) {
auto first = std::begin(Nsimplex);
auto last = std::end(Nsimplex);
if (first == last)
- return std::pair<Simplex_handle, bool>(null_simplex(), true); // ----->>
+ return { null_simplex(), true }; // ----->>
// Copy before sorting
- std::vector<Vertex_handle> copy(first, last);
+ thread_local std::vector<Vertex_handle> copy;
+ copy.clear();
+ copy.insert(copy.end(), first, last);
std::sort(std::begin(copy), std::end(copy));
+ GUDHI_CHECK_code(
+ for (Vertex_handle v : copy)
+ GUDHI_CHECK(v != null_vertex(), "cannot use the dummy null_vertex() as a real vertex");
+ )
- std::vector<std::vector<Vertex_handle>> to_be_inserted;
- std::vector<std::vector<Vertex_handle>> to_be_propagated;
- return rec_insert_simplex_and_subfaces(copy, to_be_inserted, to_be_propagated, filtration);
+ return insert_simplex_and_subfaces_sorted(copy, filtration);
}
private:
- std::pair<Simplex_handle, bool> rec_insert_simplex_and_subfaces(std::vector<Vertex_handle>& the_simplex,
- std::vector<std::vector<Vertex_handle>>& to_be_inserted,
- std::vector<std::vector<Vertex_handle>>& to_be_propagated,
- Filtration_value filtration = 0.0) {
- std::pair<Simplex_handle, bool> insert_result;
- if (the_simplex.size() > 1) {
- // Get and remove last vertex
- Vertex_handle last_vertex = the_simplex.back();
- the_simplex.pop_back();
- // Recursive call after last vertex removal
- insert_result = rec_insert_simplex_and_subfaces(the_simplex, to_be_inserted, to_be_propagated, filtration);
-
- // Concatenation of to_be_inserted and to_be_propagated
- to_be_inserted.insert(to_be_inserted.begin(), to_be_propagated.begin(), to_be_propagated.end());
- to_be_propagated = to_be_inserted;
-
- // to_be_inserted treatment
- for (auto& simplex_tbi : to_be_inserted) {
- simplex_tbi.push_back(last_vertex);
- }
- std::vector<Vertex_handle> last_simplex(1, last_vertex);
- to_be_inserted.insert(to_be_inserted.begin(), last_simplex);
- // i.e. (0,1,2) =>
- // [to_be_inserted | to_be_propagated] = [(1) (0,1) | (0)]
- // [to_be_inserted | to_be_propagated] = [(2) (0,2) (1,2) (0,1,2) | (0) (1) (0,1)]
- // N.B. : it is important the last inserted to be the highest in dimension
- // in order to return the "last" insert_simplex result
-
- // insert all to_be_inserted
- for (auto& simplex_tbi : to_be_inserted) {
- insert_result = insert_vertex_vector(simplex_tbi, filtration);
- }
- } else if (the_simplex.size() == 1) {
- // When reaching the end of recursivity, vector of simplices shall be empty and filled on back recursive
- if ((to_be_inserted.size() != 0) || (to_be_propagated.size() != 0)) {
- std::cerr << "Simplex_tree::rec_insert_simplex_and_subfaces - Error vector not empty\n";
- exit(-1);
+ /// Same as insert_simplex_and_subfaces but assumes that the range of vertices is sorted
+ template<class ForwardVertexRange = std::initializer_list<Vertex_handle>>
+ std::pair<Simplex_handle, bool> insert_simplex_and_subfaces_sorted(const ForwardVertexRange& Nsimplex, Filtration_value filt = 0) {
+ auto first = std::begin(Nsimplex);
+ auto last = std::end(Nsimplex);
+ if (first == last)
+ return { null_simplex(), true }; // FIXME: false would make more sense to me.
+ GUDHI_CHECK(std::is_sorted(first, last), "simplex vertices listed in unsorted order");
+ // Update dimension if needed. We could wait to see if the insertion succeeds, but I doubt there is much to gain.
+ dimension_ = (std::max)(dimension_, static_cast<int>(std::distance(first, last)) - 1);
+ return rec_insert_simplex_and_subfaces_sorted(root(), first, last, filt);
+ }
+ // To insert {1,2,3,4}, we insert {2,3,4} twice, once at the root, and once below 1.
+ template<class ForwardVertexIterator>
+ std::pair<Simplex_handle, bool> rec_insert_simplex_and_subfaces_sorted(Siblings* sib, ForwardVertexIterator first, ForwardVertexIterator last, Filtration_value filt) {
+ // An alternative strategy would be:
+ // - try to find the complete simplex, if found (and low filtration) exit
+ // - insert all the vertices at once in sib
+ // - loop over those (new or not) simplices, with a recursive call(++first, last)
+ Vertex_handle vertex_one = *first;
+ auto&& dict = sib->members();
+ auto insertion_result = dict.emplace(vertex_one, Node(sib, filt));
+ Simplex_handle simplex_one = insertion_result.first;
+ bool one_is_new = insertion_result.second;
+ if (!one_is_new) {
+ if (filtration(simplex_one) > filt) {
+ assign_filtration(simplex_one, filt);
+ } else {
+ // FIXME: this interface makes no sense, and it doesn't seem to be tested.
+ insertion_result.first = null_simplex();
}
- std::vector<Vertex_handle> first_simplex(1, the_simplex.back());
- // i.e. (0,1,2) => [to_be_inserted | to_be_propagated] = [(0) | ]
- to_be_inserted.push_back(first_simplex);
-
- insert_result = insert_vertex_vector(first_simplex, filtration);
- } else {
- std::cerr << "Simplex_tree::rec_insert_simplex_and_subfaces - Recursivity error\n";
- exit(-1);
}
- return insert_result;
+ if (++first == last) return insertion_result;
+ if (!has_children(simplex_one))
+ // TODO: have special code here, we know we are building the whole subtree from scratch.
+ simplex_one->second.assign_children(new Siblings(sib, vertex_one));
+ auto res = rec_insert_simplex_and_subfaces_sorted(simplex_one->second.children(), first, last, filt);
+ // No need to continue if the full simplex was already there with a low enough filtration value.
+ if (res.first != null_simplex()) rec_insert_simplex_and_subfaces_sorted(sib, first, last, filt);
+ return res;
}
public:
@@ -747,8 +774,12 @@ class Simplex_tree {
return &root_;
}
- /** Set a dimension for the simplicial complex. */
+ /** \brief Set a dimension for the simplicial complex.
+ * \details This function must be used with caution because it disables dimension recomputation when required
+ * (this recomputation can be triggered by `remove_maximal_simplex()` or `prune_above_filtration()`).
+ */
void set_dimension(int dimension) {
+ dimension_to_be_lowered_ = false;
dimension_ = dimension;
}
@@ -923,8 +954,9 @@ class Simplex_tree {
* called.
*
* Inserts all vertices and edges given by a OneSkeletonGraph.
- * OneSkeletonGraph must be a model of boost::AdjacencyGraph,
- * boost::EdgeListGraph and boost::PropertyGraph.
+ * OneSkeletonGraph must be a model of
+ * <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/EdgeListGraph.html">boost::EdgeListGraph</a>
+ * and <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
*
* The vertex filtration value is accessible through the property tag
* vertex_filtration_t.
@@ -934,7 +966,10 @@ class Simplex_tree {
* boost::graph_traits<OneSkeletonGraph>::vertex_descriptor
* must be Vertex_handle.
* boost::graph_traits<OneSkeletonGraph>::directed_category
- * must be undirected_tag. */
+ * must be undirected_tag.
+ *
+ * If an edge appears with multiplicity, the function will arbitrarily pick
+ * one representative to read the filtration value. */
template<class OneSkeletonGraph>
void insert_graph(const OneSkeletonGraph& skel_graph) {
// the simplex tree must be empty
@@ -965,18 +1000,22 @@ class Simplex_tree {
++e_it) {
auto u = source(*e_it, skel_graph);
auto v = target(*e_it, skel_graph);
- if (u < v) {
- // count edges only once { std::swap(u,v); } // u < v
- auto sh = find_vertex(u);
- if (!has_children(sh)) {
- sh->second.assign_children(new Siblings(&root_, sh->first));
- }
-
- sh->second.children()->members().emplace(
- v,
- Node(sh->second.children(),
- boost::get(edge_filtration_t(), skel_graph, *e_it)));
+ if (u == v) throw "Self-loops are not simplicial";
+ // We cannot skip edges with the wrong orientation and expect them to
+ // come a second time with the right orientation, that does not always
+ // happen in practice. emplace() should be a NOP when an element with the
+ // same key is already there, so seeing the same edge multiple times is
+ // ok.
+ // Should we actually forbid multiple edges? That would be consistent
+ // with rejecting self-loops.
+ if (v < u) std::swap(u, v);
+ auto sh = find_vertex(u);
+ if (!has_children(sh)) {
+ sh->second.assign_children(new Siblings(&root_, sh->first));
}
+
+ sh->second.children()->members().emplace(v,
+ Node(sh->second.children(), boost::get(edge_filtration_t(), skel_graph, *e_it)));
}
}
@@ -1067,6 +1106,120 @@ class Simplex_tree {
}
public:
+ /** \brief Expands a simplex tree containing only a graph. Simplices corresponding to cliques in the graph are added
+ * incrementally, faces before cofaces, unless the simplex has dimension larger than `max_dim` or `block_simplex`
+ * returns true for this simplex.
+ *
+ * @param[in] max_dim Expansion maximal dimension value.
+ * @param[in] block_simplex Blocker oracle. Its concept is <CODE>bool block_simplex(Simplex_handle sh)</CODE>
+ *
+ * The function identifies a candidate simplex whose faces are all already in the complex, inserts
+ * it with a filtration value corresponding to the maximum of the filtration values of the faces, then calls
+ * `block_simplex` on a `Simplex_handle` for this new simplex. If `block_simplex` returns true, the simplex is
+ * removed, otherwise it is kept. Note that the evaluation of `block_simplex` is a good time to update the
+ * filtration value of the simplex if you want a customized value. The algorithm then proceeds with the next
+ * candidate.
+ *
+ * @warning several candidates of the same dimension may be inserted simultaneously before calling `block_simplex`,
+ * so if you examine the complex in `block_simplex`, you may hit a few simplices of the same dimension that have not
+ * been vetted by `block_simplex` yet, or have already been rejected but not yet removed.
+ */
+ template< typename Blocker >
+ void expansion_with_blockers(int max_dim, Blocker block_simplex) {
+ // Loop must be from the end to the beginning, as higher dimension simplex are always on the left part of the tree
+ for (auto& simplex : boost::adaptors::reverse(root_.members())) {
+ if (has_children(&simplex)) {
+ siblings_expansion_with_blockers(simplex.second.children(), max_dim, max_dim - 1, block_simplex);
+ }
+ }
+ }
+
+ private:
+ /** \brief Recursive expansion with blockers of the simplex tree.*/
+ template< typename Blocker >
+ void siblings_expansion_with_blockers(Siblings* siblings, int max_dim, int k, Blocker block_simplex) {
+ if (dimension_ < max_dim - k) {
+ dimension_ = max_dim - k;
+ }
+ if (k == 0)
+ return;
+ // No need to go deeper
+ if (siblings->members().size() < 2)
+ return;
+ // Reverse loop starting before the last one for 'next' to be the last one
+ for (auto simplex = siblings->members().rbegin() + 1; simplex != siblings->members().rend(); simplex++) {
+ std::vector<std::pair<Vertex_handle, Node> > intersection;
+ for(auto next = siblings->members().rbegin(); next != simplex; next++) {
+ bool to_be_inserted = true;
+ Filtration_value filt = simplex->second.filtration();
+ // If all the boundaries are present, 'next' needs to be inserted
+ for (Simplex_handle border : boundary_simplex_range(simplex)) {
+ Simplex_handle border_child = find_child(border, next->first);
+ if (border_child == null_simplex()) {
+ to_be_inserted=false;
+ break;
+ }
+ filt = (std::max)(filt, filtration(border_child));
+ }
+ if (to_be_inserted) {
+ intersection.emplace_back(next->first, Node(nullptr, filt));
+ }
+ }
+ if (intersection.size() != 0) {
+ // Reverse the order to insert
+ Siblings * new_sib = new Siblings(siblings, // oncles
+ simplex->first, // parent
+ boost::adaptors::reverse(intersection)); // boost::container::ordered_unique_range_t
+ std::vector<Vertex_handle> blocked_new_sib_vertex_list;
+ // As all intersections are inserted, we can call the blocker function on all new_sib members
+ for (auto new_sib_member = new_sib->members().begin();
+ new_sib_member != new_sib->members().end();
+ new_sib_member++) {
+ bool blocker_result = block_simplex(new_sib_member);
+ // new_sib member has been blocked by the blocker function
+ // add it to the list to be removed - do not perform it while looping on it
+ if (blocker_result) {
+ blocked_new_sib_vertex_list.push_back(new_sib_member->first);
+ }
+ }
+ if (blocked_new_sib_vertex_list.size() == new_sib->members().size()) {
+ // Specific case where all have to be deleted
+ delete new_sib;
+ // ensure the children property
+ simplex->second.assign_children(siblings);
+ } else {
+ for (auto& blocked_new_sib_member : blocked_new_sib_vertex_list) {
+ new_sib->members().erase(blocked_new_sib_member);
+ }
+ // ensure recursive call
+ simplex->second.assign_children(new_sib);
+ siblings_expansion_with_blockers(new_sib, max_dim, k - 1, block_simplex);
+ }
+ } else {
+ // ensure the children property
+ simplex->second.assign_children(siblings);
+ }
+ }
+ }
+
+ /* \private Returns the Simplex_handle composed of the vertex list (from the Simplex_handle), plus the given
+ * Vertex_handle if the Vertex_handle is found in the Simplex_handle children list.
+ * Returns null_simplex() if it does not exist
+ */
+ Simplex_handle find_child(Simplex_handle sh, Vertex_handle vh) const {
+ if (!has_children(sh))
+ return null_simplex();
+
+ Simplex_handle child = sh->second.children()->find(vh);
+ // Specific case of boost::flat_map does not find, returns boost::flat_map::end()
+ // in simplex tree we want a null_simplex()
+ if (child == sh->second.children()->members().end())
+ return null_simplex();
+
+ return child;
+ }
+
+ public:
/** \brief Write the hasse diagram of the simplicial complex in os.
*
* Each row in the file correspond to a simplex. A line is written:
@@ -1142,6 +1295,9 @@ class Simplex_tree {
* \post Some simplex tree functions require the filtration to be valid. `prune_above_filtration()`
* function is not launching `initialize_filtration()` but returns the filtration modification information. If the
* complex has changed , please call `initialize_filtration()` to recompute it.
+ * \post Note that the dimension of the simplicial complex may be lower after calling `prune_above_filtration()`
+ * than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
+ * bound. If you care, you can call `dimension()` to recompute the exact dimension.
*/
bool prune_above_filtration(Filtration_value filtration) {
return rec_prune_above_filtration(root(), filtration);
@@ -1153,6 +1309,8 @@ class Simplex_tree {
auto last = std::remove_if(list.begin(), list.end(), [=](Dit_value_t& simplex) {
if (simplex.second.filtration() <= filt) return false;
if (has_children(&simplex)) rec_delete(simplex.second.children());
+ // dimension may need to be lowered
+ dimension_to_be_lowered_ = true;
return true;
});
@@ -1161,6 +1319,8 @@ class Simplex_tree {
// Removing the whole siblings, parent becomes a leaf.
sib->oncles()->members()[sib->parent()].assign_children(sib->oncles());
delete sib;
+ // dimension may need to be lowered
+ dimension_to_be_lowered_ = true;
return true;
} else {
// Keeping some elements of siblings. Remove the others, and recurse in the remaining ones.
@@ -1172,12 +1332,45 @@ class Simplex_tree {
return modified;
}
+ private:
+ /** \brief Deep search simplex tree dimension recompute.
+ * @return True if the dimension was modified, false otherwise.
+ * \pre Be sure the simplex tree has not a too low dimension value as the deep search stops when the former dimension
+ * has been reached (cf. `upper_bound_dimension()` and `set_dimension()` methods).
+ */
+ bool lower_upper_bound_dimension() {
+ // reset automatic detection to recompute
+ dimension_to_be_lowered_ = false;
+ int new_dimension = -1;
+ // Browse the tree from the left to the right as higher dimension cells are more likely on the left part of the tree
+ for (Simplex_handle sh : complex_simplex_range()) {
+#ifdef DEBUG_TRACES
+ for (auto vertex : simplex_vertex_range(sh)) {
+ std::cout << " " << vertex;
+ }
+ std::cout << std::endl;
+#endif // DEBUG_TRACES
+
+ int sh_dimension = dimension(sh);
+ if (sh_dimension >= dimension_)
+ // Stop browsing as soon as the dimension is reached, no need to go furter
+ return false;
+ new_dimension = (std::max)(new_dimension, sh_dimension);
+ }
+ dimension_ = new_dimension;
+ return true;
+ }
+
+
public:
/** \brief Remove a maximal simplex.
* @param[in] sh Simplex handle on the maximal simplex to remove.
* \pre Please check the simplex has no coface before removing it.
* \exception std::invalid_argument In debug mode, if sh has children.
* \post Be aware that removing is shifting data in a flat_map (initialize_filtration to be done).
+ * \post Note that the dimension of the simplicial complex may be lower after calling `remove_maximal_simplex()`
+ * than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
+ * bound. If you care, you can call `dimension()` to recompute the exact dimension.
*/
void remove_maximal_simplex(Simplex_handle sh) {
// Guarantee the simplex has no children
@@ -1195,6 +1388,8 @@ class Simplex_tree {
// Sibling is emptied : must be deleted, and its parent must point on his own Sibling
child->oncles()->members().at(child->parent()).assign_children(child->oncles());
delete child;
+ // dimension may need to be lowered
+ dimension_to_be_lowered_ = true;
}
}
@@ -1207,6 +1402,7 @@ class Simplex_tree {
std::vector<Simplex_handle> filtration_vect_;
/** \brief Upper bound on the dimension of the simplicial complex.*/
int dimension_;
+ bool dimension_to_be_lowered_ = false;
};
// Print a Simplex_tree in os.
diff --git a/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
index 7e0a454d..ab7346d4 100644
--- a/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
+++ b/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
@@ -23,6 +23,8 @@
#ifndef SIMPLEX_TREE_SIMPLEX_TREE_ITERATORS_H_
#define SIMPLEX_TREE_SIMPLEX_TREE_ITERATORS_H_
+#include <gudhi/Debug_utils.h>
+
#include <boost/iterator/iterator_facade.hpp>
#include <boost/version.hpp>
#if BOOST_VERSION >= 105600
@@ -109,11 +111,18 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
: last_(sh->first),
sib_(nullptr),
st_(st) {
+ // Only check once at the beginning instead of for every increment, as this is expensive.
+ if (SimplexTree::Options::contiguous_vertices)
+ GUDHI_CHECK(st_->contiguous_vertices(), "The set of vertices is not { 0, ..., n } without holes");
Siblings * sib = st->self_siblings(sh);
next_ = sib->parent();
sib_ = sib->oncles();
if (sib_ != nullptr) {
- sh_ = sib_->find(next_);
+ if (SimplexTree::Options::contiguous_vertices && sib_->oncles() == nullptr)
+ // Only relevant for edges
+ sh_ = sib_->members_.begin()+next_;
+ else
+ sh_ = sib_->find(next_);
} else {
sh_ = st->null_simplex();
} // vertex: == end()
@@ -140,14 +149,19 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
Siblings * for_sib = sib_;
Siblings * new_sib = sib_->oncles();
auto rit = suffix_.rbegin();
- if (SimplexTree::Options::contiguous_vertices && new_sib == nullptr && rit != suffix_.rend()) {
- // We reached the root, use a short-cut to find a vertex. We could also
- // optimize finding the second vertex of a segment, but people are
- // expected to call endpoints().
- assert(st_->contiguous_vertices());
- sh_ = for_sib->members_.begin()+*rit;
- for_sib = sh_->second.children();
- ++rit;
+ if (SimplexTree::Options::contiguous_vertices && new_sib == nullptr) {
+ // We reached the root, use a short-cut to find a vertex.
+ if (rit == suffix_.rend()) {
+ // Segment, this vertex is the last boundary simplex
+ sh_ = for_sib->members_.begin()+last_;
+ sib_ = nullptr;
+ return;
+ } else {
+ // Dim >= 2, initial step of the descent
+ sh_ = for_sib->members_.begin()+*rit;
+ for_sib = sh_->second.children();
+ ++rit;
+ }
}
for (; rit != suffix_.rend(); ++rit) {
sh_ = for_sib->find(*rit);
diff --git a/include/gudhi/Skeleton_blocker.h b/include/gudhi/Skeleton_blocker.h
index 32fe411c..aca2aa57 100644
--- a/include/gudhi/Skeleton_blocker.h
+++ b/include/gudhi/Skeleton_blocker.h
@@ -239,9 +239,6 @@ their collaboration to write the two initial papers
about this data-structure
and also Dominique for leaving him use a prototype.
-
-\copyright GNU General Public License v3.
-
@} */
} // namespace skeleton_blocker
diff --git a/include/gudhi/Strong_witness_complex.h b/include/gudhi/Strong_witness_complex.h
index 6f4bcf60..b3d00b11 100644
--- a/include/gudhi/Strong_witness_complex.h
+++ b/include/gudhi/Strong_witness_complex.h
@@ -34,7 +34,8 @@ namespace Gudhi {
namespace witness_complex {
-/* \private
+ /**
+ * \private
* \class Strong_witness_complex
* \brief Constructs strong witness complex for a given table of nearest landmarks with respect to witnesses.
* \ingroup witness_complex
@@ -127,10 +128,11 @@ class Strong_witness_complex {
if ((Landmark_id)simplex.size() - 1 > complex_dim)
complex_dim = simplex.size() - 1;
}
- complex.set_dimension(complex_dim);
return true;
}
+ //@}
+
private:
/* \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
* Iterator is needed to know until how far we can take landmarks to form simplexes.
@@ -171,7 +173,6 @@ class Strong_witness_complex {
simplex.pop_back();
}
}
- //@}
};
} // namespace witness_complex
diff --git a/include/gudhi/Tangential_complex.h b/include/gudhi/Tangential_complex.h
index a5cefd6a..6f061922 100644
--- a/include/gudhi/Tangential_complex.h
+++ b/include/gudhi/Tangential_complex.h
@@ -155,7 +155,7 @@ class Tangential_complex {
>::type Triangulation;
typedef typename Triangulation::Geom_traits Tr_traits;
typedef typename Triangulation::Weighted_point Tr_point;
- typedef typename Triangulation::Bare_point Tr_bare_point;
+ typedef typename Tr_traits::Base::Point_d Tr_bare_point;
typedef typename Triangulation::Vertex_handle Tr_vertex_handle;
typedef typename Triangulation::Full_cell_handle Tr_full_cell_handle;
typedef typename Tr_traits::Vector_d Tr_vector;
diff --git a/include/gudhi/Unitary_tests_utils.h b/include/gudhi/Unitary_tests_utils.h
new file mode 100644
index 00000000..8394a062
--- /dev/null
+++ b/include/gudhi/Unitary_tests_utils.h
@@ -0,0 +1,40 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2017 INRIA
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef UNITARY_TESTS_UTILS_H_
+#define UNITARY_TESTS_UTILS_H_
+
+#include <boost/test/unit_test.hpp>
+
+#include <iostream>
+#include <limits> // for std::numeric_limits<>
+
+template<typename FloatingType >
+void GUDHI_TEST_FLOAT_EQUALITY_CHECK(FloatingType a, FloatingType b,
+ FloatingType epsilon = std::numeric_limits<FloatingType>::epsilon()) {
+#ifdef DEBUG_TRACES
+ std::cout << "GUDHI_TEST_FLOAT_EQUALITY_CHECK - " << a << " versus " << b
+ << " | diff = " << std::fabs(a - b) << " - epsilon = " << epsilon << std::endl;
+#endif
+ BOOST_CHECK(std::fabs(a - b) < epsilon);
+}
+
+#endif // UNITARY_TESTS_UTILS_H_
diff --git a/include/gudhi/Witness_complex.h b/include/gudhi/Witness_complex.h
index bcfe8484..53c38520 100644
--- a/include/gudhi/Witness_complex.h
+++ b/include/gudhi/Witness_complex.h
@@ -130,7 +130,6 @@ class Witness_complex {
}
k++;
}
- complex.set_dimension(k-1);
return true;
}
diff --git a/include/gudhi/choose_n_farthest_points.h b/include/gudhi/choose_n_farthest_points.h
index 86500b28..8390b4c9 100644
--- a/include/gudhi/choose_n_farthest_points.h
+++ b/include/gudhi/choose_n_farthest_points.h
@@ -93,7 +93,7 @@ void choose_n_farthest_points(Kernel const &k,
// Choose randomly the first landmark
std::random_device rd;
std::mt19937 gen(rd());
- std::uniform_int_distribution<std::size_t> dis(0, (input_pts.size() - 1));
+ std::uniform_int_distribution<std::size_t> dis(0, nb_points - 1);
starting_point = dis(gen);
}
@@ -110,7 +110,7 @@ void choose_n_farthest_points(Kernel const &k,
*output_it++ = input_pts[curr_max_w];
*dist_it++ = dist_to_L[curr_max_w];
std::size_t i = 0;
- for (auto& p : input_pts) {
+ for (auto&& p : input_pts) {
double curr_dist = sqdist(p, *(std::begin(input_pts) + curr_max_w));
if (curr_dist < dist_to_L[i])
dist_to_L[i] = curr_dist;
diff --git a/include/gudhi/common_persistence_representations.h b/include/gudhi/common_persistence_representations.h
new file mode 100644
index 00000000..44e125a7
--- /dev/null
+++ b/include/gudhi/common_persistence_representations.h
@@ -0,0 +1,127 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef COMMON_PERSISTENCE_REPRESENTATIONS_H_
+#define COMMON_PERSISTENCE_REPRESENTATIONS_H_
+
+#include <utility>
+#include <string>
+#include <cmath>
+
+namespace Gudhi {
+namespace Persistence_representations {
+// this file contain an implementation of some common procedures used in Persistence_representations.
+
+// double epsi = std::numeric_limits<double>::epsilon();
+double epsi = 0.000005;
+
+/**
+ * A procedure used to compare doubles. Typically given two doubles A and B, comparing A == B is not good idea. In this
+ *case, we use the procedure almostEqual with the epsi defined at
+ * the top of the file. Setting up the epsi gives the user a tolerance on what should be consider equal.
+**/
+inline bool almost_equal(double a, double b) {
+ if (std::fabs(a - b) < epsi) return true;
+ return false;
+}
+
+// landscapes
+/**
+ * Extra functions needed in construction of barcodes.
+**/
+double minus_length(std::pair<double, double> a) { return a.first - a.second; }
+double birth_plus_deaths(std::pair<double, double> a) { return a.first + a.second; }
+
+// landscapes
+/**
+ * Given two points in R^2, the procedure compute the parameters A and B of the line y = Ax + B that crosses those two
+ *points.
+**/
+std::pair<double, double> compute_parameters_of_a_line(std::pair<double, double> p1, std::pair<double, double> p2) {
+ double a = (p2.second - p1.second) / (p2.first - p1.first);
+ double b = p1.second - a * p1.first;
+ return std::make_pair(a, b);
+}
+
+// landscapes
+/**
+ * This procedure given two points which lies on the opposite sides of x axis, compute x for which the line connecting
+ *those two points crosses x axis.
+**/
+double find_zero_of_a_line_segment_between_those_two_points(std::pair<double, double> p1,
+ std::pair<double, double> p2) {
+ if (p1.first == p2.first) return p1.first;
+ if (p1.second * p2.second > 0) {
+ std::ostringstream errMessage;
+ errMessage << "In function find_zero_of_a_line_segment_between_those_two_points the arguments are: (" << p1.first
+ << "," << p1.second << ") and (" << p2.first << "," << p2.second
+ << "). There is no zero in line between those two points. Program terminated.";
+ std::string errMessageStr = errMessage.str();
+ const char* err = errMessageStr.c_str();
+ throw(err);
+ }
+ // we assume here, that x \in [ p1.first, p2.first ] and p1 and p2 are points between which we will put the line
+ // segment
+ double a = (p2.second - p1.second) / (p2.first - p1.first);
+ double b = p1.second - a * p1.first;
+ return -b / a;
+}
+
+// landscapes
+/**
+ * This method provides a comparison of points that is used in construction of persistence landscapes. The ordering is
+ *lexicographical for the first coordinate, and reverse-lexicographical for the
+ * second coordinate.
+**/
+bool compare_points_sorting(std::pair<double, double> f, std::pair<double, double> s) {
+ if (f.first < s.first) {
+ return true;
+ } else { // f.first >= s.first
+ if (f.first > s.first) {
+ return false;
+ } else { // f.first == s.first
+ if (f.second > s.second) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+}
+
+// landscapes
+/**
+ * This procedure takes two points in R^2 and a double value x. It computes the line parsing through those two points
+ *and return the value of that linear function at x.
+**/
+double function_value(std::pair<double, double> p1, std::pair<double, double> p2, double x) {
+ // we assume here, that x \in [ p1.first, p2.first ] and p1 and p2 are points between which we will put the line
+ // segment
+ double a = (p2.second - p1.second) / (p2.first - p1.first);
+ double b = p1.second - a * p1.first;
+ return (a * x + b);
+}
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // COMMON_PERSISTENCE_REPRESENTATIONS_H_
diff --git a/include/gudhi/distance_functions.h b/include/gudhi/distance_functions.h
index f6e2ab5a..3a5d1fd5 100644
--- a/include/gudhi/distance_functions.h
+++ b/include/gudhi/distance_functions.h
@@ -23,9 +23,14 @@
#ifndef DISTANCE_FUNCTIONS_H_
#define DISTANCE_FUNCTIONS_H_
+#include <gudhi/Debug_utils.h>
+
+#include <boost/range/metafunctions.hpp>
+
#include <cmath> // for std::sqrt
#include <type_traits> // for std::decay
#include <iterator> // for std::begin, std::end
+#include <utility>
namespace Gudhi {
@@ -37,16 +42,29 @@ namespace Gudhi {
* have the same dimension. */
class Euclidean_distance {
public:
+ // boost::range_value is not SFINAE-friendly so we cannot use it in the return type
template< typename Point >
- auto operator()(const Point& p1, const Point& p2) const -> typename std::decay<decltype(*std::begin(p1))>::type {
- auto it1 = p1.begin();
- auto it2 = p2.begin();
- typename Point::value_type dist = 0.;
- for (; it1 != p1.end(); ++it1, ++it2) {
- typename Point::value_type tmp = (*it1) - (*it2);
+ typename std::iterator_traits<typename boost::range_iterator<Point>::type>::value_type
+ operator()(const Point& p1, const Point& p2) const {
+ auto it1 = std::begin(p1);
+ auto it2 = std::begin(p2);
+ typedef typename boost::range_value<Point>::type NT;
+ NT dist = 0;
+ for (; it1 != std::end(p1); ++it1, ++it2) {
+ GUDHI_CHECK(it2 != std::end(p2), "inconsistent point dimensions");
+ NT tmp = *it1 - *it2;
dist += tmp*tmp;
}
- return std::sqrt(dist);
+ GUDHI_CHECK(it2 == std::end(p2), "inconsistent point dimensions");
+ using std::sqrt;
+ return sqrt(dist);
+ }
+ template< typename T >
+ T operator() (const std::pair< T, T >& f, const std::pair< T, T >& s) const {
+ T dx = f.first - s.first;
+ T dy = f.second - s.second;
+ using std::sqrt;
+ return sqrt(dx*dx + dy*dy);
}
};
diff --git a/include/gudhi/graph_simplicial_complex.h b/include/gudhi/graph_simplicial_complex.h
index 5fe7c826..d84421b2 100644
--- a/include/gudhi/graph_simplicial_complex.h
+++ b/include/gudhi/graph_simplicial_complex.h
@@ -28,6 +28,9 @@
#include <utility> // for pair<>
#include <vector>
#include <map>
+#include <tuple> // for std::tie
+
+namespace Gudhi {
/* Edge tag for Boost PropertyGraph. */
struct edge_filtration_t {
@@ -39,4 +42,64 @@ struct vertex_filtration_t {
typedef boost::vertex_property_tag kind;
};
+template <typename SimplicialComplexForProximityGraph>
+using Proximity_graph = typename boost::adjacency_list < boost::vecS, boost::vecS, boost::undirectedS
+, boost::property < vertex_filtration_t, typename SimplicialComplexForProximityGraph::Filtration_value >
+, boost::property < edge_filtration_t, typename SimplicialComplexForProximityGraph::Filtration_value >>;
+
+/** \brief Computes the proximity graph of the points.
+ *
+ * If points contains n elements, the proximity graph is the graph with n vertices, and an edge [u,v] iff the
+ * distance function between points u and v is smaller than threshold.
+ *
+ * \tparam ForwardPointRange furnishes `.begin()` and `.end()` methods.
+ *
+ * \tparam Distance furnishes `operator()(const Point& p1, const Point& p2)`, where
+ * `Point` is a point from the `ForwardPointRange`, and that returns a `Filtration_value`.
+ */
+template< typename SimplicialComplexForProximityGraph
+ , typename ForwardPointRange
+ , typename Distance >
+Proximity_graph<SimplicialComplexForProximityGraph> compute_proximity_graph(
+ const ForwardPointRange& points,
+ typename SimplicialComplexForProximityGraph::Filtration_value threshold,
+ Distance distance) {
+ using Vertex_handle = typename SimplicialComplexForProximityGraph::Vertex_handle;
+ using Filtration_value = typename SimplicialComplexForProximityGraph::Filtration_value;
+
+ std::vector<std::pair< Vertex_handle, Vertex_handle >> edges;
+ std::vector< Filtration_value > edges_fil;
+ std::map< Vertex_handle, Filtration_value > vertices;
+
+ Vertex_handle idx_u, idx_v;
+ Filtration_value fil;
+ idx_u = 0;
+ for (auto it_u = points.begin(); it_u != points.end(); ++it_u) {
+ idx_v = idx_u + 1;
+ for (auto it_v = it_u + 1; it_v != points.end(); ++it_v, ++idx_v) {
+ fil = distance(*it_u, *it_v);
+ if (fil <= threshold) {
+ edges.emplace_back(idx_u, idx_v);
+ edges_fil.push_back(fil);
+ }
+ }
+ ++idx_u;
+ }
+
+ // Points are labeled from 0 to idx_u-1
+ Proximity_graph<SimplicialComplexForProximityGraph> skel_graph(edges.begin(), edges.end(), edges_fil.begin(), idx_u);
+
+ auto vertex_prop = boost::get(vertex_filtration_t(), skel_graph);
+
+ typename boost::graph_traits<Proximity_graph<SimplicialComplexForProximityGraph>>::vertex_iterator vi, vi_end;
+ for (std::tie(vi, vi_end) = boost::vertices(skel_graph);
+ vi != vi_end; ++vi) {
+ boost::put(vertex_prop, *vi, 0.);
+ }
+
+ return skel_graph;
+}
+
+} // namespace Gudhi
+
#endif // GRAPH_SIMPLICIAL_COMPLEX_H_
diff --git a/include/gudhi/read_persistence_from_file.h b/include/gudhi/read_persistence_from_file.h
new file mode 100644
index 00000000..83b89d0e
--- /dev/null
+++ b/include/gudhi/read_persistence_from_file.h
@@ -0,0 +1,120 @@
+/* This file is part of the Gudhi Library. The Gudhi library
+ * (Geometric Understanding in Higher Dimensions) is a generic C++
+ * library for computational topology.
+ *
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2016 INRIA (France)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef READ_PERSISTENCE_FROM_FILE_H_
+#define READ_PERSISTENCE_FROM_FILE_H_
+
+#include <gudhi/reader_utils.h>
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <limits> // for std::numeric_limits<>
+
+namespace Gudhi {
+namespace Persistence_representations {
+
+/**
+ * Universal procedure to read files with persistence. It ignores the lines starting from # (treat them as comments).
+ * It reads the fist line which is not a comment and assume that there are some numerical entries over there. The
+ *program assume
+ * that each other line in the file, which is not a comment, have the same number of numerical entries (2, 3 or 4).
+ * If there are two numerical entries per line, then the function assume that they are birth/death coordinates.
+ * If there are three numerical entries per line, then the function assume that they are: dimension and birth/death
+ *coordinates.
+ * If there are four numerical entries per line, then the function assume that they are: the characteristic of a filed
+ *over which
+ * persistence was computed, dimension and birth/death coordinates.
+ * The 'inf' string can appear only as a last element of a line.
+ * The procedure returns vector of persistence pairs.
+**/
+std::vector<std::pair<double, double> > read_persistence_intervals_in_one_dimension_from_file(
+ std::string const& filename, int dimension = -1, double what_to_substitute_for_infinite_bar = -1) {
+ bool dbg = false;
+
+ std::string line;
+ std::vector<std::pair<double, double> > barcode_initial =
+ read_persistence_intervals_in_dimension(filename, (int)dimension);
+ std::vector<std::pair<double, double> > final_barcode;
+ final_barcode.reserve(barcode_initial.size());
+
+ if (dbg) {
+ std::cerr << "Here are the intervals that we read from the file : \n";
+ for (size_t i = 0; i != barcode_initial.size(); ++i) {
+ std::cout << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl;
+ }
+ getchar();
+ }
+
+ for (size_t i = 0; i != barcode_initial.size(); ++i) {
+ if (dbg) {
+ std::cout << "COnsidering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second
+ << std::endl;
+ }
+
+ if (barcode_initial[i].first > barcode_initial[i].second) {
+ // note that in this case barcode_initial[i].second != std::numeric_limits<double>::infinity()
+ if (dbg) std::cout << "Swap and enter \n";
+ // swap them to make sure that birth < death
+ final_barcode.push_back(std::pair<double, double>(barcode_initial[i].second, barcode_initial[i].first));
+ continue;
+ } else {
+ if (barcode_initial[i].second != std::numeric_limits<double>::infinity()) {
+ if (dbg) std::cout << "Simply enters\n";
+ // in this case, due to the previous conditions we know that barcode_initial[i].first <
+ // barcode_initial[i].second, so we put them as they are
+ final_barcode.push_back(std::pair<double, double>(barcode_initial[i].first, barcode_initial[i].second));
+ }
+ }
+
+ if ((barcode_initial[i].second == std::numeric_limits<double>::infinity()) &&
+ (what_to_substitute_for_infinite_bar != -1)) {
+ if (barcode_initial[i].first < what_to_substitute_for_infinite_bar) // if only birth < death.
+ {
+ final_barcode.push_back(
+ std::pair<double, double>(barcode_initial[i].first, what_to_substitute_for_infinite_bar));
+ }
+ } else {
+ // if the variable what_to_substitute_for_infinite_bar is not set, then we ignore all the infinite bars.
+ }
+ }
+
+ if (dbg) {
+ std::cerr << "Here are the final bars that we are sending further : \n";
+ for (size_t i = 0; i != final_barcode.size(); ++i) {
+ std::cout << final_barcode[i].first << " " << final_barcode[i].second << std::endl;
+ }
+ std::cerr << "final_barcode.size() : " << final_barcode.size() << std::endl;
+ getchar();
+ }
+
+ return final_barcode;
+} // read_persistence_intervals_in_one_dimension_from_file
+
+} // namespace Persistence_representations
+} // namespace Gudhi
+
+#endif // READ_PERSISTENCE_FROM_FILE_H_