summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp50
-rw-r--r--src/Alpha_complex/concept/SimplicialComplexForAlpha.h18
-rw-r--r--src/Alpha_complex/doc/Intro_alpha_complex.h80
-rw-r--r--src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp12
-rw-r--r--src/Alpha_complex/example/Alpha_complex_from_off.cpp2
-rw-r--r--src/Alpha_complex/example/Alpha_complex_from_points.cpp12
-rw-r--r--src/Alpha_complex/example/CMakeLists.txt19
-rw-r--r--src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp2
-rw-r--r--src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp24
-rw-r--r--src/Alpha_complex/example/Weighted_alpha_complex_from_points.cpp52
-rw-r--r--src/Alpha_complex/example/weightedalpha3dfrompoints_for_doc.txt4
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h379
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex/Alpha_kernel_d.h141
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h102
-rw-r--r--src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp34
-rw-r--r--src/Alpha_complex/test/Alpha_complex_dim3_unit_test.cpp117
-rw-r--r--src/Alpha_complex/test/Alpha_complex_unit_test.cpp147
-rw-r--r--src/Alpha_complex/test/Alpha_kernel_d_unit_test.cpp109
-rw-r--r--src/Alpha_complex/test/CMakeLists.txt45
-rw-r--r--src/Alpha_complex/test/Delaunay_complex_unit_test.cpp68
-rw-r--r--src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp16
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp20
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp60
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp127
-rw-r--r--src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp20
-rw-r--r--src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp77
-rw-r--r--src/Alpha_complex/utilities/CMakeLists.txt124
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp31
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_persistence.cpp131
-rw-r--r--src/Alpha_complex/utilities/alphacomplex.md9
-rw-r--r--src/Bitmap_cubical_complex/example/CMakeLists.txt2
-rw-r--r--src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp2
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h74
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h83
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h21
-rw-r--r--src/Bitmap_cubical_complex/test/Bitmap_test.cpp14
-rw-r--r--src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp4
-rw-r--r--src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp4
-rw-r--r--src/Bottleneck_distance/doc/Intro_bottleneck_distance.h4
-rw-r--r--src/Bottleneck_distance/doc/perturb_pd.pngbin20864 -> 15532 bytes
-rw-r--r--src/Bottleneck_distance/example/CMakeLists.txt20
-rw-r--r--src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp38
-rw-r--r--src/Bottleneck_distance/example/bottleneck_basic_example.cpp4
-rw-r--r--src/Bottleneck_distance/include/gudhi/Bottleneck.h8
-rw-r--r--src/Bottleneck_distance/include/gudhi/Neighbors_finder.h2
-rw-r--r--src/Bottleneck_distance/include/gudhi/Persistence_graph.h77
-rw-r--r--src/Bottleneck_distance/test/bottleneck_unit_test.cpp83
-rw-r--r--src/Bottleneck_distance/utilities/bottleneck_distance.cpp4
-rw-r--r--src/Bottleneck_distance/utilities/bottleneckdistance.md4
-rw-r--r--src/CMakeLists.txt34
-rw-r--r--src/Cech_complex/benchmark/CMakeLists.txt19
-rw-r--r--src/Cech_complex/benchmark/cech_complex_benchmark.cpp169
-rw-r--r--src/Cech_complex/concept/SimplicialComplexForCech.h4
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h22
-rw-r--r--src/Cech_complex/example/CMakeLists.txt19
-rw-r--r--src/Cech_complex/example/cech_complex_example_from_points.cpp43
-rw-r--r--src/Cech_complex/example/cech_complex_step_by_step.cpp154
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex.h92
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex_blocker.h92
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.COPYRIGHT4
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.README26
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.hpp523
-rw-r--r--src/Cech_complex/include/gudhi/Sphere_circumradius.h78
-rw-r--r--src/Cech_complex/test/CMakeLists.txt20
-rw-r--r--src/Cech_complex/test/test_cech_complex.cpp122
-rw-r--r--src/Cech_complex/utilities/CMakeLists.txt39
-rw-r--r--src/Cech_complex/utilities/cech_persistence.cpp95
-rw-r--r--src/Cech_complex/utilities/cechcomplex.md22
-rw-r--r--src/Collapse/doc/dominated_edge.pngbin0 -> 349766 bytes
-rw-r--r--src/Collapse/doc/intro_edge_collapse.h81
-rw-r--r--src/Collapse/example/CMakeLists.txt28
-rw-r--r--src/Collapse/example/edge_collapse_basic_example.cpp36
-rw-r--r--src/Collapse/example/edge_collapse_conserve_persistence.cpp159
-rw-r--r--src/Collapse/example/edge_collapse_example_basic.txt5
-rw-r--r--src/Collapse/include/gudhi/Flag_complex_edge_collapser.h337
-rw-r--r--src/Collapse/test/CMakeLists.txt13
-rw-r--r--src/Collapse/test/collapse_unit_test.cpp198
-rw-r--r--src/Collapse/utilities/CMakeLists.txt37
-rw-r--r--src/Collapse/utilities/collapse.md63
-rw-r--r--src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp152
-rw-r--r--src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp181
-rw-r--r--src/Contraction/doc/so3.svg2
-rw-r--r--src/Contraction/example/CMakeLists.txt1
-rw-r--r--src/Contraction/example/Garland_heckbert.cpp6
-rw-r--r--src/Contraction/example/Garland_heckbert/Error_quadric.h2
-rw-r--r--src/Contraction/example/Rips_contraction.cpp12
-rw-r--r--src/Contraction/include/gudhi/Edge_contraction.h22
-rw-r--r--src/Contraction/include/gudhi/Skeleton_blocker_contractor.h27
-rw-r--r--src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h46
-rw-r--r--src/Coxeter_triangulation/concept/IntersectionOracle.h104
-rw-r--r--src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h81
-rw-r--r--src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h56
-rw-r--r--src/Coxeter_triangulation/doc/custom_function.pngbin0 -> 256301 bytes
-rw-r--r--src/Coxeter_triangulation/doc/flat_torus_with_boundary.pngbin0 -> 222900 bytes
-rw-r--r--src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h240
-rw-r--r--src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.pngbin0 -> 589120 bytes
-rw-r--r--src/Coxeter_triangulation/doc/two_triangulations.pngbin0 -> 39507 bytes
-rw-r--r--src/Coxeter_triangulation/example/CMakeLists.txt19
-rw-r--r--src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp55
-rw-r--r--src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt26
-rw-r--r--src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp87
-rw-r--r--src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp72
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h77
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h340
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h285
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h40
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h219
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h157
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h64
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h93
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h110
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h90
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h80
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h69
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h85
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h84
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h71
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h78
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h88
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Negation.h84
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h111
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Translate.h89
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h72
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h60
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h171
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h550
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h154
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h261
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h270
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h216
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h83
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h113
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h93
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h256
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h120
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h111
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h54
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h73
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h66
-rw-r--r--src/Coxeter_triangulation/test/CMakeLists.txt30
-rw-r--r--src/Coxeter_triangulation/test/cell_complex_test.cpp59
-rw-r--r--src/Coxeter_triangulation/test/freud_triang_test.cpp114
-rw-r--r--src/Coxeter_triangulation/test/function_test.cpp158
-rw-r--r--src/Coxeter_triangulation/test/manifold_tracing_test.cpp62
-rw-r--r--src/Coxeter_triangulation/test/oracle_test.cpp56
-rw-r--r--src/Coxeter_triangulation/test/perm_rep_test.cpp61
-rw-r--r--src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp36
-rw-r--r--src/Doxyfile.in585
-rw-r--r--src/GudhUI/model/Model.h64
-rw-r--r--src/GudhUI/todo.txt2
-rw-r--r--src/GudhUI/utils/Bar_code_persistence.h4
-rw-r--r--src/GudhUI/utils/Critical_points.h4
-rw-r--r--src/GudhUI/utils/Edge_contractor.h2
-rw-r--r--src/GudhUI/utils/Furthest_point_epsilon_net.h4
-rw-r--r--src/GudhUI/utils/K_nearest_builder.h2
-rw-r--r--src/GudhUI/utils/Lloyd_builder.h2
-rw-r--r--src/GudhUI/utils/Rips_builder.h6
-rw-r--r--src/GudhUI/utils/Vertex_collapsor.h2
-rw-r--r--src/GudhUI/view/View_parameter.h6
-rw-r--r--src/GudhUI/view/Viewer.cpp4
-rw-r--r--src/GudhUI/view/Viewer_instructor.h2
-rw-r--r--src/Hasse_complex/include/gudhi/Hasse_complex.h6
-rw-r--r--src/Nerve_GIC/doc/Intro_graph_induced_complex.h12
-rw-r--r--src/Nerve_GIC/example/CMakeLists.txt37
-rw-r--r--src/Nerve_GIC/example/CoordGIC.cpp10
-rw-r--r--src/Nerve_GIC/example/FuncGIC.cpp10
-rw-r--r--src/Nerve_GIC/include/gudhi/GIC.h108
-rw-r--r--src/Nerve_GIC/test/CMakeLists.txt17
-rw-r--r--src/Nerve_GIC/utilities/CMakeLists.txt36
-rw-r--r--src/Nerve_GIC/utilities/Nerve.cpp10
-rw-r--r--src/Nerve_GIC/utilities/VoronoiGIC.cpp10
-rw-r--r--src/Nerve_GIC/utilities/km.py.COPYRIGHT2
-rw-r--r--src/Persistence_representations/example/CMakeLists.txt6
-rw-r--r--src/Persistence_representations/example/persistence_heat_maps.cpp8
-rw-r--r--src/Persistence_representations/example/persistence_intervals.cpp40
-rw-r--r--src/Persistence_representations/example/persistence_landscape.cpp24
-rw-r--r--src/Persistence_representations/example/persistence_landscape_on_grid.cpp22
-rw-r--r--src/Persistence_representations/example/persistence_vectors.cpp10
-rw-r--r--src/Persistence_representations/example/sliced_wasserstein.cpp8
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_heat_maps.h66
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_intervals.h60
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_landscape.h222
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h122
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_vectors.h30
-rw-r--r--src/Persistence_representations/include/gudhi/read_persistence_from_file.h16
-rw-r--r--src/Persistence_representations/test/persistence_heat_maps_test.cpp2
-rw-r--r--src/Persistence_representations/test/persistence_lanscapes_test.cpp2
-rw-r--r--src/Persistence_representations/utilities/CMakeLists.txt10
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt19
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp4
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt7
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp8
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp8
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp4
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp2
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt8
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp4
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt8
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp4
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt8
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp14
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp10
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp6
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp4
-rw-r--r--src/Persistent_cohomology/benchmark/CMakeLists.txt14
-rw-r--r--src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp38
-rw-r--r--src/Persistent_cohomology/concept/FilteredComplex.h2
-rw-r--r--src/Persistent_cohomology/doc/Intro_persistent_cohomology.h23
-rw-r--r--src/Persistent_cohomology/example/CMakeLists.txt80
-rw-r--r--src/Persistent_cohomology/example/custom_persistence_sort.cpp31
-rw-r--r--src/Persistent_cohomology/example/persistence_from_file.cpp43
-rw-r--r--src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp48
-rw-r--r--src/Persistent_cohomology/example/plain_homology.cpp11
-rw-r--r--src/Persistent_cohomology/example/rips_multifield_persistence.cpp39
-rw-r--r--src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp31
-rw-r--r--src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp41
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h38
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h18
-rw-r--r--src/Persistent_cohomology/test/betti_numbers_unit_test.cpp30
-rw-r--r--src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp196
-rw-r--r--src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp88
-rw-r--r--src/Rips_complex/doc/Intro_rips_complex.h27
-rw-r--r--src/Rips_complex/example/CMakeLists.txt13
-rw-r--r--src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp14
-rw-r--r--src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp12
-rw-r--r--src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp12
-rw-r--r--src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp2
-rw-r--r--src/Rips_complex/example/example_rips_complex_from_off_file.cpp2
-rw-r--r--src/Rips_complex/example/example_sparse_rips.cpp2
-rw-r--r--src/Rips_complex/include/gudhi/Sparse_rips_complex.h129
-rw-r--r--src/Rips_complex/test/test_rips_complex.cpp108
-rw-r--r--src/Rips_complex/utilities/CMakeLists.txt67
-rw-r--r--src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp31
-rw-r--r--src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp31
-rw-r--r--src/Rips_complex/utilities/rips_persistence.cpp31
-rw-r--r--src/Rips_complex/utilities/sparse_rips_persistence.cpp31
-rw-r--r--src/Simplex_tree/doc/Intro_simplex_tree.h12
-rw-r--r--src/Simplex_tree/example/CMakeLists.txt16
-rw-r--r--src/Simplex_tree/example/README73
-rw-r--r--src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp30
-rw-r--r--src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp68
-rw-r--r--src/Simplex_tree/example/graph_expansion_with_blocker.cpp24
-rw-r--r--src/Simplex_tree/example/mini_simplex_tree.cpp4
-rw-r--r--src/Simplex_tree/example/simple_simplex_tree.cpp141
-rw-r--r--src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp56
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h429
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h148
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h10
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h13
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h2
-rw-r--r--src/Simplex_tree/test/CMakeLists.txt12
-rw-r--r--src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp42
-rw-r--r--src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp264
-rw-r--r--src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp46
-rw-r--r--src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp148
-rw-r--r--src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp154
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp468
-rw-r--r--src/Skeleton_blocker/concept/SkeletonBlockerDS.h2
-rw-r--r--src/Skeleton_blocker/example/CMakeLists.txt4
-rw-r--r--src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp16
-rw-r--r--src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp8
-rw-r--r--src/Skeleton_blocker/example/Skeleton_blocker_link.cpp10
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h29
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h14
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h4
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h4
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h16
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h2
-rwxr-xr-x[-rw-r--r--]src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h4
-rw-r--r--src/Skeleton_blocker/test/test_skeleton_blocker_complex.cpp88
-rw-r--r--src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp16
-rw-r--r--src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp92
-rw-r--r--src/Spatial_searching/doc/Intro_spatial_searching.h2
-rw-r--r--src/Spatial_searching/example/CMakeLists.txt1
-rw-r--r--src/Spatial_searching/example/example_spatial_searching.cpp24
-rw-r--r--src/Spatial_searching/include/gudhi/Kd_tree_search.h98
-rw-r--r--src/Spatial_searching/test/test_Kd_tree_search.cpp4
-rw-r--r--src/Subsampling/doc/Intro_subsampling.h6
-rw-r--r--src/Subsampling/example/CMakeLists.txt9
-rw-r--r--src/Subsampling/example/example_choose_n_farthest_points.cpp6
-rw-r--r--src/Subsampling/example/example_custom_distance.cpp44
-rw-r--r--src/Subsampling/example/example_custom_kernel.cpp63
-rw-r--r--src/Subsampling/example/example_pick_n_random_points.cpp4
-rw-r--r--src/Subsampling/example/example_sparsify_point_set.cpp4
-rw-r--r--src/Subsampling/include/gudhi/choose_n_farthest_points.h84
-rw-r--r--src/Subsampling/include/gudhi/pick_n_random_points.h14
-rw-r--r--src/Subsampling/include/gudhi/sparsify_point_set.h28
-rw-r--r--src/Subsampling/test/test_choose_n_farthest_points.cpp35
-rw-r--r--src/Subsampling/test/test_pick_n_random_points.cpp4
-rw-r--r--src/Subsampling/test/test_sparsify_point_set.cpp6
-rw-r--r--src/Tangential_complex/benchmark/XML_exporter.h2
-rw-r--r--src/Tangential_complex/benchmark/benchmark_tc.cpp3
-rw-r--r--src/Tangential_complex/doc/Intro_tangential_complex.h4
-rw-r--r--src/Tangential_complex/example/CMakeLists.txt2
-rw-r--r--src/Tangential_complex/example/example_basic.cpp4
-rw-r--r--src/Tangential_complex/example/example_with_perturb.cpp1
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h31
-rw-r--r--src/Tangential_complex/test/test_tangential_complex.cpp11
-rw-r--r--src/Toplex_map/benchmark/CMakeLists.txt4
-rw-r--r--src/Toplex_map/benchmark/benchmark_tm.cpp20
-rw-r--r--src/Toplex_map/example/simple_toplex_map.cpp54
-rw-r--r--src/Toplex_map/test/lazy_toplex_map_unit_test.cpp76
-rw-r--r--src/Toplex_map/test/toplex_map_unit_test.cpp50
-rw-r--r--src/Witness_complex/doc/Witness_complex_doc.h10
-rw-r--r--src/Witness_complex/example/CMakeLists.txt7
-rw-r--r--src/Witness_complex/example/example_nearest_landmark_table.cpp2
-rw-r--r--src/Witness_complex/example/example_strong_witness_complex_off.cpp11
-rw-r--r--src/Witness_complex/example/example_witness_complex_off.cpp11
-rw-r--r--src/Witness_complex/example/example_witness_complex_sphere.cpp10
-rw-r--r--src/Witness_complex/include/gudhi/Active_witness/Active_witness.h2
-rw-r--r--src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h2
-rw-r--r--src/Witness_complex/include/gudhi/Strong_witness_complex.h2
-rw-r--r--src/Witness_complex/include/gudhi/Witness_complex.h2
-rw-r--r--src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h2
-rw-r--r--src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp16
-rw-r--r--src/Witness_complex/test/test_simple_witness_complex.cpp4
-rw-r--r--src/Witness_complex/utilities/CMakeLists.txt36
-rw-r--r--src/Witness_complex/utilities/strong_witness_persistence.cpp35
-rw-r--r--src/Witness_complex/utilities/weak_witness_persistence.cpp35
-rw-r--r--src/Witness_complex/utilities/witnesscomplex.md4
-rw-r--r--src/cmake/modules/FindTBB.cmake6
-rw-r--r--src/cmake/modules/GUDHI_boost_test.cmake7
-rw-r--r--src/cmake/modules/GUDHI_compilation_flags.cmake40
-rw-r--r--src/cmake/modules/GUDHI_doxygen_target.cmake59
-rw-r--r--src/cmake/modules/GUDHI_modules.cmake17
-rw-r--r--src/cmake/modules/GUDHI_options.cmake15
-rw-r--r--src/cmake/modules/GUDHI_submodules.cmake5
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake173
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake44
-rw-r--r--src/common/benchmark/CMakeLists.txt4
-rw-r--r--src/common/benchmark/Graph_simplicial_complex_benchmark.cpp8
-rw-r--r--src/common/doc/examples.h225
-rw-r--r--src/common/doc/footer.html13
-rw-r--r--src/common/doc/header.html124
-rw-r--r--src/common/doc/installation.h295
-rw-r--r--src/common/doc/main_page.md65
-rwxr-xr-x[-rw-r--r--]src/common/doc/stylesheet.css1371
-rw-r--r--src/common/example/CMakeLists.txt1
-rw-r--r--src/common/example/example_CGAL_3D_points_off_reader.cpp2
-rw-r--r--src/common/example/example_CGAL_points_off_reader.cpp6
-rw-r--r--src/common/include/gudhi/Clock.h4
-rw-r--r--src/common/include/gudhi/Debug_utils.h10
-rw-r--r--src/common/include/gudhi/Points_3D_off_io.h12
-rw-r--r--src/common/include/gudhi/Points_off_io.h12
-rw-r--r--src/common/include/gudhi/Unitary_tests_utils.h4
-rw-r--r--src/common/include/gudhi/distance_functions.h49
-rw-r--r--src/common/include/gudhi/graph_simplicial_complex.h5
-rw-r--r--src/common/include/gudhi/random_point_generators.h65
-rw-r--r--src/common/include/gudhi/reader_utils.h20
-rw-r--r--src/common/include/gudhi/writing_persistence_to_file.h4
-rw-r--r--src/common/test/test_distance_matrix_reader.cpp18
-rw-r--r--src/common/test/test_persistence_intervals_reader.cpp124
-rw-r--r--src/common/utilities/off_file_from_shape_generator.cpp2
-rw-r--r--src/python/CMakeLists.txt545
-rw-r--r--src/python/doc/_templates/layout.html54
-rw-r--r--src/python/doc/alpha_complex_ref.rst1
-rw-r--r--src/python/doc/alpha_complex_sum.inc28
-rw-r--r--src/python/doc/alpha_complex_user.rst202
-rw-r--r--src/python/doc/bottleneck_distance_sum.inc24
-rw-r--r--src/python/doc/bottleneck_distance_user.rst24
-rw-r--r--src/python/doc/clustering.inc12
-rw-r--r--src/python/doc/clustering.rst72
-rwxr-xr-xsrc/python/doc/conf.py7
-rw-r--r--src/python/doc/cubical_complex_sklearn_itf_ref.rst102
-rw-r--r--src/python/doc/cubical_complex_sum.inc32
-rw-r--r--src/python/doc/cubical_complex_tflow_itf_ref.rst40
-rw-r--r--src/python/doc/cubical_complex_user.rst38
-rw-r--r--src/python/doc/datasets.inc14
-rw-r--r--src/python/doc/datasets.rst133
-rw-r--r--src/python/doc/differentiation_sum.inc12
-rw-r--r--src/python/doc/examples.rst34
-rw-r--r--src/python/doc/fileformats.rst2
-rw-r--r--src/python/doc/img/barycenter.pngbin0 -> 12433 bytes
-rw-r--r--src/python/doc/img/sklearn.pngbin0 -> 9368 bytes
-rw-r--r--src/python/doc/img/sphere_3d.pngbin0 -> 529148 bytes
-rw-r--r--src/python/doc/img/spiral-color.pngbin0 -> 222425 bytes
-rw-r--r--src/python/doc/img/spiral_2d.pngbin0 -> 279276 bytes
-rw-r--r--src/python/doc/index.rst18
-rw-r--r--src/python/doc/installation.rst295
-rw-r--r--src/python/doc/ls_simplex_tree_tflow_itf_ref.rst53
-rw-r--r--src/python/doc/nerve_gic_complex_sum.inc28
-rw-r--r--src/python/doc/nerve_gic_complex_user.rst9
-rw-r--r--src/python/doc/persistence_graphical_tools_sum.inc24
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst52
-rw-r--r--src/python/doc/persistent_cohomology_sum.inc10
-rw-r--r--src/python/doc/persistent_cohomology_user.rst36
-rw-r--r--src/python/doc/point_cloud.rst30
-rw-r--r--src/python/doc/point_cloud_sum.inc23
-rwxr-xr-xsrc/python/doc/python3-sphinx-build.py11
-rw-r--r--src/python/doc/representations.rst66
-rw-r--r--src/python/doc/representations_sum.inc24
-rw-r--r--src/python/doc/rips_complex_ref.rst22
-rw-r--r--src/python/doc/rips_complex_sum.inc31
-rw-r--r--src/python/doc/rips_complex_tflow_itf_ref.rst48
-rw-r--r--src/python/doc/rips_complex_user.rst208
-rw-r--r--src/python/doc/simplex_tree_ref.rst1
-rw-r--r--src/python/doc/simplex_tree_sum.inc27
-rw-r--r--src/python/doc/tangential_complex_sum.inc24
-rw-r--r--src/python/doc/tangential_complex_user.rst8
-rw-r--r--src/python/doc/wasserstein_distance_sum.inc22
-rw-r--r--src/python/doc/wasserstein_distance_user.rst179
-rw-r--r--src/python/doc/witness_complex_sum.inc30
-rw-r--r--src/python/doc/witness_complex_user.rst7
-rw-r--r--src/python/doc/zbibliography.rst10
-rwxr-xr-xsrc/python/example/alpha_complex_diagram_persistence_from_off_file_example.py63
-rw-r--r--src/python/example/alpha_complex_from_generated_points_on_sphere_example.py35
-rwxr-xr-xsrc/python/example/alpha_complex_from_points_example.py7
-rwxr-xr-xsrc/python/example/alpha_rips_persistence_bottleneck_distance.py137
-rwxr-xr-xsrc/python/example/diagram_vectorizations_distances_kernels.py109
-rwxr-xr-xsrc/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py21
-rwxr-xr-xsrc/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py13
-rwxr-xr-xsrc/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py18
-rwxr-xr-xsrc/python/example/plot_alpha_complex.py5
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py4
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py7
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_off_file_example.py18
-rwxr-xr-xsrc/python/example/rips_complex_edge_collapse_example.py62
-rwxr-xr-xsrc/python/example/rips_complex_from_points_example.py5
-rwxr-xr-xsrc/python/example/simplex_tree_example.py10
-rwxr-xr-xsrc/python/example/tangential_complex_plain_homology_from_off_file_example.py20
-rw-r--r--src/python/gudhi/alpha_complex.pyx144
-rw-r--r--src/python/gudhi/bottleneck.cc55
-rw-r--r--src/python/gudhi/bottleneck.pyx48
-rw-r--r--src/python/gudhi/clustering/__init__.py0
-rw-r--r--src/python/gudhi/clustering/_tomato.cc277
-rw-r--r--src/python/gudhi/clustering/tomato.py321
-rw-r--r--src/python/gudhi/cubical_complex.pyx173
-rw-r--r--src/python/gudhi/datasets/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/_points.cc121
-rw-r--r--src/python/gudhi/datasets/generators/points.py59
-rw-r--r--src/python/gudhi/datasets/remote.py223
-rw-r--r--src/python/gudhi/dtm_rips_complex.py51
-rw-r--r--src/python/gudhi/hera/__init__.py7
-rw-r--r--src/python/gudhi/hera/bottleneck.cc54
-rw-r--r--src/python/gudhi/hera/wasserstein.cc62
-rw-r--r--src/python/gudhi/nerve_gic.pyx41
-rw-r--r--src/python/gudhi/off_reader.pyx37
-rw-r--r--src/python/gudhi/off_utils.pyx62
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx168
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py448
-rw-r--r--src/python/gudhi/point_cloud/__init__.py0
-rw-r--r--src/python/gudhi/point_cloud/dtm.py179
-rw-r--r--src/python/gudhi/point_cloud/knn.py344
-rw-r--r--src/python/gudhi/point_cloud/timedelay.py94
-rw-r--r--src/python/gudhi/representations/kernel_methods.py200
-rw-r--r--src/python/gudhi/representations/metrics.py422
-rw-r--r--src/python/gudhi/representations/preprocessing.py117
-rw-r--r--src/python/gudhi/representations/vector_methods.py613
-rw-r--r--src/python/gudhi/rips_complex.pyx34
-rw-r--r--src/python/gudhi/simplex_tree.pxd99
-rw-r--r--src/python/gudhi/simplex_tree.pyx600
-rw-r--r--src/python/gudhi/sklearn/__init__.py0
-rw-r--r--src/python/gudhi/sklearn/cubical_persistence.py110
-rw-r--r--src/python/gudhi/subsampling.pyx23
-rw-r--r--src/python/gudhi/tensorflow/__init__.py5
-rw-r--r--src/python/gudhi/tensorflow/cubical_layer.py82
-rw-r--r--src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py87
-rw-r--r--src/python/gudhi/tensorflow/rips_layer.py93
-rw-r--r--src/python/gudhi/wasserstein.py97
-rw-r--r--src/python/gudhi/wasserstein/__init__.py1
-rw-r--r--src/python/gudhi/wasserstein/barycenter.py146
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py355
-rw-r--r--src/python/gudhi/weighted_rips_complex.py61
-rw-r--r--src/python/include/Alpha_complex_factory.h156
-rw-r--r--src/python/include/Alpha_complex_interface.h62
-rw-r--r--src/python/include/Euclidean_strong_witness_complex_interface.h2
-rw-r--r--src/python/include/Euclidean_witness_complex_interface.h2
-rw-r--r--src/python/include/Nerve_gic_interface.h1
-rw-r--r--src/python/include/Persistent_cohomology_interface.h234
-rw-r--r--src/python/include/Rips_complex_interface.h1
-rw-r--r--src/python/include/Simplex_tree_interface.h164
-rw-r--r--src/python/include/Strong_witness_complex_interface.h2
-rw-r--r--src/python/include/Subsampling_interface.h10
-rw-r--r--src/python/include/Tangential_complex_interface.h1
-rw-r--r--src/python/include/Witness_complex_interface.h2
-rw-r--r--src/python/include/pybind11_diagram_utils.h39
-rw-r--r--src/python/introduction.rst24
-rw-r--r--src/python/pyproject.toml3
-rw-r--r--src/python/setup.py.in54
-rwxr-xr-xsrc/python/test/test_alpha_complex.py251
-rwxr-xr-xsrc/python/test/test_betti_curve_representations.py59
-rwxr-xr-xsrc/python/test/test_bottleneck_distance.py18
-rwxr-xr-xsrc/python/test/test_cover_complex.py4
-rwxr-xr-xsrc/python/test/test_cubical_complex.py58
-rwxr-xr-xsrc/python/test/test_datasets_generators.py39
-rw-r--r--src/python/test/test_diff.py78
-rwxr-xr-xsrc/python/test/test_dtm.py101
-rw-r--r--src/python/test/test_dtm_rips_complex.py32
-rwxr-xr-xsrc/python/test/test_euclidean_witness_complex.py6
-rwxr-xr-xsrc/python/test/test_knn.py130
-rw-r--r--src/python/test/test_off.py21
-rw-r--r--src/python/test/test_persistence_graphical_tools.py122
-rwxr-xr-xsrc/python/test/test_reader_utils.py35
-rw-r--r--src/python/test/test_remote_datasets.py87
-rwxr-xr-xsrc/python/test/test_representations.py259
-rw-r--r--src/python/test/test_representations_preprocessing.py39
-rwxr-xr-xsrc/python/test/test_rips_complex.py27
-rwxr-xr-xsrc/python/test/test_simplex_generators.py64
-rwxr-xr-xsrc/python/test/test_simplex_tree.py414
-rw-r--r--src/python/test/test_sklearn_cubical_persistence.py59
-rwxr-xr-xsrc/python/test/test_subsampling.py120
-rwxr-xr-xsrc/python/test/test_tangential_complex.py3
-rwxr-xr-xsrc/python/test/test_time_delay.py43
-rwxr-xr-xsrc/python/test/test_tomato.py65
-rwxr-xr-xsrc/python/test/test_wasserstein_barycenter.py46
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py187
-rwxr-xr-xsrc/python/test/test_wasserstein_with_tensors.py47
-rw-r--r--src/python/test/test_weighted_rips_complex.py63
530 files changed, 24678 insertions, 9002 deletions
diff --git a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
index 99ad94b9..e7d85686 100644
--- a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
+++ b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
@@ -19,7 +19,7 @@ std::ofstream results_csv("results.csv");
template <typename Kernel>
void benchmark_points_on_torus_dD(const std::string& msg) {
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"" << msg << "\";" << std::endl;
results_csv << "\"nb_points\";"
@@ -29,7 +29,7 @@ void benchmark_points_on_torus_dD(const std::string& msg) {
using K = CGAL::Epick_d<CGAL::Dimension_tag<3>>;
for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) {
- std::cout << " Alpha complex dD on torus with " << nb_points << " points." << std::endl;
+ std::clog << " Alpha complex dD on torus with " << nb_points << " points." << std::endl;
std::vector<K::Point_d> points_on_torus = Gudhi::generate_points_on_torus_3D<K>(nb_points, 1.0, 0.5);
std::vector<typename Kernel::Point_d> points;
@@ -41,26 +41,26 @@ void benchmark_points_on_torus_dD(const std::string& msg) {
ac_create_clock.begin();
Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_points(points);
ac_create_clock.end();
- std::cout << ac_create_clock;
+ std::clog << ac_create_clock;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_points_on_torus_dD - complex creation");
st_create_clock.begin();
alpha_complex_from_points.create_complex(complex);
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";"
<< st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_points_on_torus_dD - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_points_on_torus_dD - nb simplices = " << complex.num_simplices() << std::endl;
}
}
template <typename Alpha_complex_3d>
void benchmark_points_on_torus_3D(const std::string& msg) {
using K = CGAL::Epick_d<CGAL::Dimension_tag<3>>;
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"" << msg << "\";" << std::endl;
results_csv << "\"nb_points\";"
@@ -69,7 +69,7 @@ void benchmark_points_on_torus_3D(const std::string& msg) {
<< "\"complex_creation_time(sec.)\";" << std::endl;
for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) {
- std::cout << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl;
+ std::clog << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl;
std::vector<K::Point_d> points_on_torus = Gudhi::generate_points_on_torus_3D<K>(nb_points, 1.0, 0.5);
std::vector<typename Alpha_complex_3d::Point_3> points;
@@ -81,19 +81,19 @@ void benchmark_points_on_torus_3D(const std::string& msg) {
ac_create_clock.begin();
Alpha_complex_3d alpha_complex_from_points(points);
ac_create_clock.end();
- std::cout << ac_create_clock;
+ std::clog << ac_create_clock;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_points_on_torus_3D - complex creation");
st_create_clock.begin();
alpha_complex_from_points.create_complex(complex);
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";"
<< st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl;
}
}
@@ -101,7 +101,7 @@ template <typename Weighted_alpha_complex_3d>
void benchmark_weighted_points_on_torus_3D(const std::string& msg) {
using K = CGAL::Epick_d<CGAL::Dimension_tag<3>>;
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"" << msg << "\";" << std::endl;
results_csv << "\"nb_points\";"
@@ -112,7 +112,7 @@ void benchmark_weighted_points_on_torus_3D(const std::string& msg) {
CGAL::Random random(8);
for (int nb_points = 1000; nb_points <= 125000; nb_points *= 5) {
- std::cout << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl;
+ std::clog << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl;
std::vector<K::Point_d> points_on_torus = Gudhi::generate_points_on_torus_3D<K>(nb_points, 1.0, 0.5);
using Point = typename Weighted_alpha_complex_3d::Bare_point_3;
@@ -128,25 +128,25 @@ void benchmark_weighted_points_on_torus_3D(const std::string& msg) {
ac_create_clock.begin();
Weighted_alpha_complex_3d alpha_complex_from_points(points);
ac_create_clock.end();
- std::cout << ac_create_clock;
+ std::clog << ac_create_clock;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_weighted_points_on_torus_3D - complex creation");
st_create_clock.begin();
alpha_complex_from_points.create_complex(complex);
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << nb_points << ";" << complex.num_simplices() << ";" << ac_create_clock.num_seconds() << ";"
<< st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_weighted_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_weighted_points_on_torus_3D - nb simplices = " << complex.num_simplices() << std::endl;
}
}
template <typename Periodic_alpha_complex_3d>
void benchmark_periodic_points(const std::string& msg) {
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"" << msg << "\";" << std::endl;
results_csv << "\"nb_points\";"
@@ -157,7 +157,7 @@ void benchmark_periodic_points(const std::string& msg) {
CGAL::Random random(8);
for (double nb_points = 10.; nb_points <= 40.; nb_points += 10.) {
- std::cout << " Periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." << std::endl;
+ std::clog << " Periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points." << std::endl;
using Point = typename Periodic_alpha_complex_3d::Point_3;
std::vector<Point> points;
@@ -174,25 +174,25 @@ void benchmark_periodic_points(const std::string& msg) {
ac_create_clock.begin();
Periodic_alpha_complex_3d alpha_complex_from_points(points, 0., 0., 0., nb_points, nb_points, nb_points);
ac_create_clock.end();
- std::cout << ac_create_clock;
+ std::clog << ac_create_clock;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_periodic_points - complex creation");
st_create_clock.begin();
alpha_complex_from_points.create_complex(complex);
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << nb_points * nb_points * nb_points << ";" << complex.num_simplices() << ";"
<< ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_periodic_points - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_periodic_points - nb simplices = " << complex.num_simplices() << std::endl;
}
}
template <typename Weighted_periodic_alpha_complex_3d>
void benchmark_weighted_periodic_points(const std::string& msg) {
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"" << msg << "\";" << std::endl;
results_csv << "\"nb_points\";"
@@ -203,7 +203,7 @@ void benchmark_weighted_periodic_points(const std::string& msg) {
CGAL::Random random(8);
for (double nb_points = 10.; nb_points <= 40.; nb_points += 10.) {
- std::cout << " Weighted periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points."
+ std::clog << " Weighted periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points."
<< std::endl;
using Point = typename Weighted_periodic_alpha_complex_3d::Bare_point_3;
@@ -224,19 +224,19 @@ void benchmark_weighted_periodic_points(const std::string& msg) {
ac_create_clock.begin();
Weighted_periodic_alpha_complex_3d alpha_complex_from_points(points, 0., 0., 0., nb_points, nb_points, nb_points);
ac_create_clock.end();
- std::cout << ac_create_clock;
+ std::clog << ac_create_clock;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_weighted_periodic_points - complex creation");
st_create_clock.begin();
alpha_complex_from_points.create_complex(complex);
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << nb_points * nb_points * nb_points << ";" << complex.num_simplices() << ";"
<< ac_create_clock.num_seconds() << ";" << st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_weighted_periodic_points - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_weighted_periodic_points - nb simplices = " << complex.num_simplices() << std::endl;
}
}
diff --git a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
index 1c6c3b0c..c20c3201 100644
--- a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
+++ b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
@@ -72,6 +72,24 @@ struct SimplicialComplexForAlpha {
/** \brief Return type of an insertion of a simplex
*/
typedef unspecified Insertion_result_type;
+
+ /** \name Map interface
+ * Conceptually a `std::unordered_map<Simplex_handle,std::size_t>`.
+ * @{ */
+ /** \brief Data stored for each simplex.
+ *
+ * Must be an integer type. */
+ typedef unspecified Simplex_key;
+ /** \brief Returns a constant dummy number that is either negative,
+ * or at least as large as the number of simplices. Suggested value: -1. */
+ Simplex_key null_key ();
+ /** \brief Returns the number stored for a simplex by `assign_key()`.
+ *
+ * If `assign_key()` has not been called, it must return `null_key()`. */
+ Simplex_key key ( Simplex_handle sh );
+ /** \brief Store a number for a simplex, which can later be retrieved with `key()`. */
+ void assign_key(Simplex_handle sh, Simplex_key n);
+ /** @} */
};
} // namespace alpha_complex
diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h
index a8b1a106..41e5e16d 100644
--- a/src/Alpha_complex/doc/Intro_alpha_complex.h
+++ b/src/Alpha_complex/doc/Intro_alpha_complex.h
@@ -22,6 +22,18 @@ namespace alpha_complex {
*
* @{
*
+<div class="toc">
+Table of Contents
+<ul>
+<li class="level1"><a href="#definition">Definition</a></li>
+<li class="level1"><a href="#pointsexample">Example from points</a></li>
+<li class="level1"><a href="#createcomplexalgorithm">Create complex algorithm</a></li>
+<li class="level1"><a href="#weightedversion">Weighted specific version</a></li>
+<li class="level1"><a href="#offexample">Example from OFF file</a></li>
+<li class="level1"><a href="#weighted3dexample">3d specific version</a></li>
+</ul>
+</div>
+
* \section definition Definition
*
* Alpha_complex is a <a target="_blank" href="https://en.wikipedia.org/wiki/Simplicial_complex">simplicial complex</a>
@@ -46,22 +58,23 @@ namespace alpha_complex {
* \cite cgal:s-gkd-19b from CGAL as template parameter.
*
* \remark
- * - When an \f$\alpha\f$-complex is constructed with an infinite value of \f$ \alpha^2 \f$, the complex is a Delaunay
- * complex (with special filtration values).
+ * - When the simplicial complex is constructed with an infinite value of \f$ \alpha^2 \f$, the complex is a Delaunay
+ * complex with special filtration values. The Delaunay complex without filtration values is also available by passing
+ * `default_filtration_value=true` to `Alpha_complex::create_complex`.
* - For people only interested in the topology of the \ref alpha_complex (for instance persistence),
* \ref alpha_complex is equivalent to the \ref cech_complex and much smaller if you do not bound the radii.
* \ref cech_complex can still make sense in higher dimension precisely because you can bound the radii.
- * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass exact=true to create_complex, the
- * filtration values are the exact ones converted to the filtration value type of the simplicial complex. This can be
- * very slow. If you pass exact=false (the default), the filtration values are only guaranteed to have a small
- * multiplicative error compared to the exact value, see <code><a class="el" target="_blank"
- * href="https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html">
+ * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass `exact=true` to
+ * `Alpha_complex::create_complex`, the filtration values are the exact ones converted to the filtration value type of
+ * the simplicial complex. This can be very slow. If you pass `exact=false` (the default), the filtration values are
+ * only guaranteed to have a small multiplicative error compared to the exact value, see <code>
+ * <a class="el" target="_blank" href="https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html">
* CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double</a></code> for details. A drawback, when computing
* persistence, is that an empty exact interval [10^12,10^12] may become a non-empty approximate interval
* [10^12,10^12+10^6]. Using `CGAL::Epick_d` makes the computations slightly faster, and the combinatorics are still
* exact, but the computation of filtration values can exceptionally be arbitrarily bad. In all cases, we still
* guarantee that the output is a valid filtration (faces have a filtration value no larger than their cofaces).
- * - For performances reasons, it is advised to use `Alpha_complex` with \ref cgal &ge; 5.0.0.
+ * - For performances reasons, it is advised to use \ref eigen &ge; 3.3.5 and \ref cgal &ge; 5.2.0.
*
* \section pointsexample Example from points
*
@@ -70,7 +83,7 @@ namespace alpha_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Alpha_complex/Alpha_complex_from_points.cpp
+ * \include Alpha_complex_from_points.cpp
*
* When launching:
*
@@ -79,7 +92,7 @@ namespace alpha_complex {
*
* the program output is:
*
- * \include Alpha_complex/alphaoffreader_for_doc_60.txt
+ * \include alphaoffreader_for_doc_60.txt
*
* \section createcomplexalgorithm Create complex algorithm
*
@@ -94,6 +107,7 @@ namespace alpha_complex {
* \subsection filtrationcomputation Filtration value computation algorithm
* <br>
* \f$
+ * \begin{array}{l}
* \textbf{for } \text{i : dimension } \rightarrow 0 \textbf{ do}\\
* \quad \textbf{for all } \sigma \text{ of dimension i}\\
* \quad\quad \textbf{if } \text{filtration(} \sigma ) \text{ is NaN} \textbf{ then}\\
@@ -114,6 +128,7 @@ namespace alpha_complex {
* \textbf{end for}\\
* \text{make_filtration_non_decreasing()}\\
* \text{prune_above_filtration()}\\
+ * \end{array}
* \f$
*
* \subsubsection dimension2 Dimension 2
@@ -139,12 +154,38 @@ namespace alpha_complex {
* not quite define a proper filtration (i.e. non-decreasing with respect to inclusion).
* We fix that up by calling `SimplicialComplexForAlpha::make_filtration_non_decreasing()`.
*
+ * \note This is not the case in `exact` version, this is the reason why it is not called in this case.
+ *
* \subsubsection pruneabove Prune above given filtration value
*
* The simplex tree is pruned from the given maximum \f$ \alpha^2 \f$ value (cf.
* `SimplicialComplexForAlpha::prune_above_filtration()`).
* In the following example, the value is given by the user as argument of the program.
*
+ * \section weightedversion Weighted specific version
+ * <b>Requires:</b> \ref eigen &ge; 3.1.0 and \ref cgal &ge; 5.1.0.
+ *
+ * A weighted version for Alpha complex is available (cf. Alpha_complex). It is like a usual Alpha complex, but based
+ * on a <a href="https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT">CGAL regular triangulation</a> instead
+ * of Delaunay.
+ *
+ * This example builds the CGAL weighted alpha shapes from a small molecule, and initializes the alpha complex with
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
+ * weighted alpha shapes</a>.
+ *
+ * Then, it is asked to display information about the alpha complex.
+ *
+ * \include Weighted_alpha_complex_from_points.cpp
+ *
+ * When launching:
+ *
+ * \code $> ./Weighted_alpha_complex_example_from_points
+ * \endcode
+ *
+ * the program output is:
+ *
+ * \include weightedalpha3dfrompoints_for_doc.txt
+ *
*
* \section offexample Example from OFF file
*
@@ -153,7 +194,7 @@ namespace alpha_complex {
*
* Then, it is asked to display information about the alpha complex.
*
- * \include Alpha_complex/Alpha_complex_from_off.cpp
+ * \include Alpha_complex_from_off.cpp
*
* When launching:
*
@@ -162,10 +203,10 @@ namespace alpha_complex {
*
* the program output is:
*
- * \include Alpha_complex/alphaoffreader_for_doc_32.txt
+ * \include alphaoffreader_for_doc_32.txt
*
*
- * \section weighted3dexample 3d specific example
+ * \section weighted3dexample 3d specific version
*
* A specific module for Alpha complex is available in 3d (cf. Alpha_complex_3d) and allows to construct standard,
* weighted, periodic or weighted and periodic versions of alpha complexes. Alpha values computation can be
@@ -173,21 +214,14 @@ namespace alpha_complex {
* Gudhi::alpha_complex::complexity::EXACT.
*
* This example builds the CGAL 3d weighted alpha shapes from a small molecule, and initializes the alpha complex with
- * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13">CGAL 3d
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
* weighted alpha shapes</a>.
*
* Then, it is asked to display information about the alpha complex.
*
- * \include Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp
- *
- * When launching:
- *
- * \code $> ./Alpha_complex_example_weighted_3d_from_points
- * \endcode
- *
- * the program output is:
+ * \include Weighted_alpha_complex_3d_from_points.cpp
*
- * \include Alpha_complex/weightedalpha3dfrompoints_for_doc.txt
+ * The results will be the same as in \ref weightedversion .
*
*/
/** @} */ // end defgroup alpha_complex
diff --git a/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp b/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp
index 0e359a27..a2c85138 100644
--- a/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp
+++ b/src/Alpha_complex/example/Alpha_complex_3d_from_points.cpp
@@ -38,18 +38,18 @@ int main(int argc, char **argv) {
// ----------------------------------------------------------------------------
// Display information about the alpha complex
// ----------------------------------------------------------------------------
- std::cout << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
+ std::clog << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
<< " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
for (auto f_simplex : simplex.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << simplex.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
return 0;
diff --git a/src/Alpha_complex/example/Alpha_complex_from_off.cpp b/src/Alpha_complex/example/Alpha_complex_from_off.cpp
index 220a66de..dba1710e 100644
--- a/src/Alpha_complex/example/Alpha_complex_from_off.cpp
+++ b/src/Alpha_complex/example/Alpha_complex_from_off.cpp
@@ -30,7 +30,7 @@ int main(int argc, char **argv) {
ouput_file_stream.open(std::string(argv[3]));
streambuffer = ouput_file_stream.rdbuf();
} else {
- streambuffer = std::cout.rdbuf();
+ streambuffer = std::clog.rdbuf();
}
Gudhi::Simplex_tree<> simplex;
diff --git a/src/Alpha_complex/example/Alpha_complex_from_points.cpp b/src/Alpha_complex/example/Alpha_complex_from_points.cpp
index 6526ca3a..c79535bf 100644
--- a/src/Alpha_complex/example/Alpha_complex_from_points.cpp
+++ b/src/Alpha_complex/example/Alpha_complex_from_points.cpp
@@ -35,18 +35,18 @@ int main() {
// ----------------------------------------------------------------------------
// Display information about the alpha complex
// ----------------------------------------------------------------------------
- std::cout << "Alpha complex is of dimension " << simplex.dimension() <<
+ std::clog << "Alpha complex is of dimension " << simplex.dimension() <<
" - " << simplex.num_simplices() << " simplices - " <<
simplex.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
for (auto f_simplex : simplex.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << simplex.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
}
return 0;
diff --git a/src/Alpha_complex/example/CMakeLists.txt b/src/Alpha_complex/example/CMakeLists.txt
index b0337934..1fc2330a 100644
--- a/src/Alpha_complex/example/CMakeLists.txt
+++ b/src/Alpha_complex/example/CMakeLists.txt
@@ -25,22 +25,28 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_test(NAME Alpha_complex_example_fast_from_off_32 COMMAND $<TARGET_FILE:Alpha_complex_example_fast_from_off>
"${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" "32.0" "${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt")
-if (DIFF_PATH)
+ if (DIFF_PATH)
# Do not forget to copy test results files in current binary dir
file(COPY "alphaoffreader_for_doc_32.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY "alphaoffreader_for_doc_60.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_test(Alpha_complex_example_from_off_60_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
+ set_tests_properties(Alpha_complex_example_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_60)
add_test(Alpha_complex_example_from_off_32_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
+ set_tests_properties(Alpha_complex_example_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_32)
add_test(Alpha_complex_example_fast_from_off_60_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
+ set_tests_properties(Alpha_complex_example_fast_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_60)
add_test(Alpha_complex_example_fast_from_off_32_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
+ set_tests_properties(Alpha_complex_example_fast_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_32)
endif()
+endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
add_executable ( Alpha_complex_example_weighted_3d_from_points Weighted_alpha_complex_3d_from_points.cpp )
target_link_libraries(Alpha_complex_example_weighted_3d_from_points ${CGAL_LIBRARY})
if (TBB_FOUND)
@@ -57,4 +63,13 @@ if (DIFF_PATH)
add_test(NAME Alpha_complex_example_3d_from_points
COMMAND $<TARGET_FILE:Alpha_complex_example_3d_from_points>)
-endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+endif(NOT CGAL_VERSION VERSION_LESS 4.11.0)
+
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ add_executable ( Weighted_alpha_complex_example_from_points Weighted_alpha_complex_from_points.cpp )
+ target_link_libraries(Weighted_alpha_complex_example_from_points ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Weighted_alpha_complex_example_from_points ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Weighted_alpha_complex_example_from_points COMMAND $<TARGET_FILE:Weighted_alpha_complex_example_from_points>)
+endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
diff --git a/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp
index f181005a..64728470 100644
--- a/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp
+++ b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp
@@ -35,7 +35,7 @@ int main(int argc, char **argv) {
ouput_file_stream.open(std::string(argv[3]));
streambuffer = ouput_file_stream.rdbuf();
} else {
- streambuffer = std::cout.rdbuf();
+ streambuffer = std::clog.rdbuf();
}
Gudhi::Simplex_tree<> simplex;
diff --git a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
index fcf80802..ee12d418 100644
--- a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
+++ b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
@@ -7,7 +7,7 @@
#include <vector>
#include <limits> // for numeric limits
-// Complexity = FAST, weighted = true, periodic = false
+// Complexity = SAFE, weighted = true, periodic = false
using Weighted_alpha_complex_3d =
Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::SAFE, true, false>;
using Bare_point = Weighted_alpha_complex_3d::Bare_point_3;
@@ -18,11 +18,11 @@ int main(int argc, char **argv) {
// Init of a list of points and weights from a small molecule
// ----------------------------------------------------------------------------
std::vector<Weighted_point> weighted_points;
- weighted_points.push_back(Weighted_point(Bare_point(1, -1, -1), 4.));
- weighted_points.push_back(Weighted_point(Bare_point(-1, 1, -1), 4.));
- weighted_points.push_back(Weighted_point(Bare_point(-1, -1, 1), 4.));
- weighted_points.push_back(Weighted_point(Bare_point(1, 1, 1), 4.));
- weighted_points.push_back(Weighted_point(Bare_point(2, 2, 2), 1.));
+ weighted_points.emplace_back(Bare_point(1, -1, -1), 4.);
+ weighted_points.emplace_back(Bare_point(-1, 1, -1), 4.);
+ weighted_points.emplace_back(Bare_point(-1, -1, 1), 4.);
+ weighted_points.emplace_back(Bare_point(1, 1, 1), 4.);
+ weighted_points.emplace_back(Bare_point(2, 2, 2), 1.);
// ----------------------------------------------------------------------------
// Init of an alpha complex from the list of points
@@ -34,18 +34,18 @@ int main(int argc, char **argv) {
// ----------------------------------------------------------------------------
// Display information about the alpha complex
// ----------------------------------------------------------------------------
- std::cout << "Alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
+ std::clog << "Weighted alpha complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
<< " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
for (auto f_simplex : simplex.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << simplex.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
return 0;
diff --git a/src/Alpha_complex/example/Weighted_alpha_complex_from_points.cpp b/src/Alpha_complex/example/Weighted_alpha_complex_from_points.cpp
new file mode 100644
index 00000000..d1f3e436
--- /dev/null
+++ b/src/Alpha_complex/example/Weighted_alpha_complex_from_points.cpp
@@ -0,0 +1,52 @@
+#include <gudhi/Alpha_complex.h>
+// to construct a simplex_tree from alpha complex
+#include <gudhi/Simplex_tree.h>
+
+#include <CGAL/Epeck_d.h>
+
+#include <iostream>
+#include <vector>
+
+// Explicit dimension 2 Epeck_d kernel
+using Kernel = CGAL::Epeck_d< CGAL::Dimension_tag<3> >;
+using Bare_point = Kernel::Point_d;
+using Weighted_point = Kernel::Weighted_point_d;
+using Vector_of_points = std::vector<Weighted_point>;
+
+int main() {
+ // ----------------------------------------------------------------------------
+ // Init of a list of points and weights from a small molecule
+ // ----------------------------------------------------------------------------
+ Vector_of_points points;
+ points.emplace_back(Bare_point(1, -1, -1), 4.);
+ points.emplace_back(Bare_point(-1, 1, -1), 4.);
+ points.emplace_back(Bare_point(-1, -1, 1), 4.);
+ points.emplace_back(Bare_point(1, 1, 1), 4.);
+ points.emplace_back(Bare_point(2, 2, 2), 1.);
+
+ // ----------------------------------------------------------------------------
+ // Init of an alpha complex from the list of points
+ // ----------------------------------------------------------------------------
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_weighted_points(points);
+
+ Gudhi::Simplex_tree<> simplex;
+ if (alpha_complex_from_weighted_points.create_complex(simplex)) {
+ // ----------------------------------------------------------------------------
+ // Display information about the alpha complex
+ // ----------------------------------------------------------------------------
+ std::clog << "Weighted alpha complex is of dimension " << simplex.dimension() <<
+ " - " << simplex.num_simplices() << " simplices - " <<
+ simplex.num_vertices() << " vertices." << std::endl;
+
+ std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ for (auto f_simplex : simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
+ }
+ }
+ return 0;
+}
diff --git a/src/Alpha_complex/example/weightedalpha3dfrompoints_for_doc.txt b/src/Alpha_complex/example/weightedalpha3dfrompoints_for_doc.txt
index 7a09998d..f0695f1a 100644
--- a/src/Alpha_complex/example/weightedalpha3dfrompoints_for_doc.txt
+++ b/src/Alpha_complex/example/weightedalpha3dfrompoints_for_doc.txt
@@ -1,5 +1,5 @@
-Alpha complex is of dimension 3 - 29 simplices - 5 vertices.
-Iterator on alpha complex simplices in the filtration order, with [filtration value]:
+Weighted alpha complex is of dimension 3 - 29 simplices - 5 vertices.
+Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:
( 0 ) -> [-4]
( 1 ) -> [-4]
( 2 ) -> [-4]
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index f2a05e95..a7372f19 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -12,14 +12,17 @@
#ifndef ALPHA_COMPLEX_H_
#define ALPHA_COMPLEX_H_
+#include <gudhi/Alpha_complex/Alpha_kernel_d.h>
#include <gudhi/Debug_utils.h>
// to construct Alpha_complex from a OFF file of points
#include <gudhi/Points_off_io.h>
-#include <stdlib.h>
-#include <math.h> // isnan, fmax
+#include <cmath> // isnan, fmax
+#include <memory> // for std::unique_ptr
+#include <cstddef> // for std::size_t
#include <CGAL/Delaunay_triangulation.h>
+#include <CGAL/Regular_triangulation.h> // aka. Weighted Delaunay triangulation
#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
#include <CGAL/Epick_d.h> // For FAST version
#include <CGAL/Spatial_sort_traits_adapter_d.h>
@@ -29,6 +32,10 @@
#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
+#include <boost/range/size.hpp>
+#include <boost/range/combine.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
#include <iostream>
#include <vector>
#include <string>
@@ -37,6 +44,7 @@
#include <utility> // std::pair
#include <stdexcept>
#include <numeric> // for std::iota
+#include <algorithm> // for std::sort
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
@@ -61,7 +69,7 @@ template<typename D> struct Is_Epeck_D<CGAL::Epeck_d<D>> { static const bool val
* \ingroup alpha_complex
*
* \details
- * The data structure is constructing a CGAL Delaunay triangulation (for more informations on CGAL Delaunay
+ * The data structure is constructing a CGAL Delaunay triangulation (for more information on CGAL Delaunay
* triangulation, please refer to the corresponding chapter in page http://doc.cgal.org/latest/Triangulation/) from a
* range of points or from an OFF file (cf. Points_off_reader).
*
@@ -91,47 +99,61 @@ template<typename D> struct Is_Epeck_D<CGAL::Epeck_d<D>> { static const bool val
* guarantee that the output is a valid filtration (faces have a filtration value no larger than their cofaces).
* - For performances reasons, it is advised to use `Alpha_complex` with \ref cgal &ge; 5.0.0.
*/
-template<class Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>
+template<class Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>, bool Weighted = false>
class Alpha_complex {
+ private:
+ // Vertex_handle internal type (required by triangulation_ and vertices_).
+ using Internal_vertex_handle = std::ptrdiff_t;
+
public:
+ /** \brief Geometric traits class that provides the geometric types and predicates needed by the triangulations.*/
+ using Geom_traits = std::conditional_t<Weighted, CGAL::Regular_triangulation_traits_adapter<Kernel>, Kernel>;
+
// Add an int in TDS to save point index in the structure
- typedef CGAL::Triangulation_data_structure<typename Kernel::Dimension,
- CGAL::Triangulation_vertex<Kernel, std::ptrdiff_t>,
- CGAL::Triangulation_full_cell<Kernel> > TDS;
- /** \brief A Delaunay triangulation of a set of points in \f$ \mathbb{R}^D\f$.*/
- typedef CGAL::Delaunay_triangulation<Kernel, TDS> Delaunay_triangulation;
-
- /** \brief A point in Euclidean space.*/
- typedef typename Kernel::Point_d Point_d;
- /** \brief Geometric traits class that provides the geometric types and predicates needed by Delaunay
- * triangulations.*/
- typedef Kernel Geom_traits;
+ using TDS = CGAL::Triangulation_data_structure<typename Geom_traits::Dimension,
+ CGAL::Triangulation_vertex<Geom_traits, Internal_vertex_handle>,
+ CGAL::Triangulation_full_cell<Geom_traits> >;
- private:
- typedef typename Kernel::Compute_squared_radius_d Squared_Radius;
- typedef typename Kernel::Side_of_bounded_sphere_d Is_Gabriel;
- typedef typename Kernel::Point_dimension_d Point_Dimension;
+ /** \brief A (Weighted or not) Delaunay triangulation of a set of points in \f$ \mathbb{R}^D\f$.*/
+ using Triangulation = std::conditional_t<Weighted, CGAL::Regular_triangulation<Kernel, TDS>,
+ CGAL::Delaunay_triangulation<Kernel, TDS>>;
- // Type required to compute squared radius, or side of bounded sphere on a vector of points.
- typedef typename std::vector<Point_d> Vector_of_CGAL_points;
+ /** \brief CGAL kernel container for computations in function of the weighted or not characteristics.*/
+ using A_kernel_d = Alpha_kernel_d<Kernel, Weighted>;
- // Vertex_iterator type from CGAL.
- typedef typename Delaunay_triangulation::Vertex_iterator CGAL_vertex_iterator;
+ // Numeric type of coordinates in the kernel
+ using FT = typename A_kernel_d::FT;
+
+ /** \brief Sphere is a std::pair<Kernel::Point_d, Kernel::FT> (aka. circurmcenter and squared radius).
+ * If Weighted, Sphere is a Kernel::Weighted_point_d (aka. circurmcenter and the weight value is the squared radius).
+ */
+ using Sphere = typename A_kernel_d::Sphere;
- // size_type type from CGAL.
- typedef typename Delaunay_triangulation::size_type size_type;
+ /** \brief A point, or a weighted point in Euclidean space.*/
+ using Point_d = typename Geom_traits::Point_d;
+
+ private:
+ // Vertex_iterator type from CGAL.
+ using CGAL_vertex_iterator = typename Triangulation::Vertex_iterator;
// Structure to switch from simplex tree vertex handle to CGAL vertex iterator.
- typedef typename std::vector< CGAL_vertex_iterator > Vector_vertex_iterator;
+ using Vector_vertex_iterator = std::vector< CGAL_vertex_iterator >;
private:
/** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator.
* Vertex handles are inserted sequentially, starting at 0.*/
Vector_vertex_iterator vertex_handle_to_iterator_;
/** \brief Pointer on the CGAL Delaunay triangulation.*/
- Delaunay_triangulation* triangulation_;
+ std::unique_ptr<Triangulation> triangulation_;
/** \brief Kernel for triangulation_ functions access.*/
- Kernel kernel_;
+ A_kernel_d kernel_;
+ /** \brief Vertices to be inserted first by the create_complex method to avoid quadratic complexity.
+ * It isn't just [0, n) if some points have multiplicity (only one copy appears in the complex).
+ */
+ std::vector<Internal_vertex_handle> vertices_;
+
+ /** \brief Cache for geometric constructions: circumcenter and squared radius of a simplex.*/
+ std::vector<Sphere> cache_, old_cache_;
public:
/** \brief Alpha_complex constructor from an OFF file name.
@@ -143,8 +165,7 @@ class Alpha_complex {
*
* @param[in] off_file_name OFF file [path and] name.
*/
- Alpha_complex(const std::string& off_file_name)
- : triangulation_(nullptr) {
+ Alpha_complex(const std::string& off_file_name) {
Gudhi::Points_off_reader<Point_d> off_reader(off_file_name);
if (!off_reader.is_valid()) {
std::cerr << "Alpha_complex - Unable to read file " << off_file_name << "\n";
@@ -156,23 +177,40 @@ class Alpha_complex {
/** \brief Alpha_complex constructor from a list of points.
*
- * Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
+ * The vertices may be not contiguous as some points may be discarded in the triangulation (duplicate points,
+ * weighted hidden point, ...).
*
- * @param[in] points Range of points to triangulate. Points must be in Kernel::Point_d
+ * @param[in] points Range of points to triangulate. Points must be in Kernel::Point_d or Kernel::Weighted_point_d.
*
- * The type InputPointRange must be a range for which std::begin and
- * std::end return input iterators on a Kernel::Point_d.
+ * The type InputPointRange must be a range for which std::begin and std::end return input iterators on a
+ * Kernel::Point_d or Kernel::Weighted_point_d.
*/
template<typename InputPointRange >
- Alpha_complex(const InputPointRange& points)
- : triangulation_(nullptr) {
+ Alpha_complex(const InputPointRange& points) {
init_from_range(points);
}
- /** \brief Alpha_complex destructor deletes the Delaunay triangulation.
+ /** \brief Alpha_complex constructor from a list of points and weights.
+ *
+ * The vertices may be not contiguous as some points may be discarded in the triangulation (duplicate points,
+ * weighted hidden point, ...).
+ *
+ * @param[in] points Range of points to triangulate. Points must be in Kernel::Point_d.
+ *
+ * @param[in] weights Range of points weights. Weights must be in Kernel::FT.
+ *
+ * The type InputPointRange must be a range for which std::begin and std::end return input iterators on a
+ * Kernel::Point_d.
*/
- ~Alpha_complex() {
- delete triangulation_;
+ template <typename InputPointRange, typename WeightRange>
+ Alpha_complex(const InputPointRange& points, WeightRange weights) {
+ static_assert(Weighted, "This constructor is not available for non-weighted versions of Alpha_complex");
+ // FIXME: this test is only valid if we have a forward range
+ GUDHI_CHECK(boost::size(weights) == boost::size(points),
+ std::invalid_argument("Points number in range different from weights range number"));
+ auto weighted_points = boost::range::combine(points, weights)
+ | boost::adaptors::transformed([](auto const&t){return Point_d(boost::get<0>(t), boost::get<1>(t));});
+ init_from_range(weighted_points);
}
// Forbid copy/move constructor/assignment operator
@@ -181,6 +219,15 @@ class Alpha_complex {
Alpha_complex (Alpha_complex&& other) = delete;
Alpha_complex& operator= (Alpha_complex&& other) = delete;
+ /** \brief Returns the number of finite vertices in the triangulation.
+ */
+ std::size_t num_vertices() const {
+ if (triangulation_ == nullptr)
+ return 0;
+ else
+ return triangulation_->number_of_vertices();
+ }
+
/** \brief get_point returns the point corresponding to the vertex given as parameter.
*
* @param[in] vertex Vertex handle of the point to retrieve.
@@ -200,64 +247,119 @@ class Alpha_complex {
<< std::endl;
#endif
+#if CGAL_VERSION_NR < 1050101000
+ // Make compilation fail if weighted and CGAL < 5.1
+ static_assert(!Weighted, "Weighted Alpha_complex is only available for CGAL >= 5.1");
+#endif
+
auto first = std::begin(points);
auto last = std::end(points);
if (first != last) {
- // point_dimension function initialization
- Point_Dimension point_dimension = kernel_.point_dimension_d_object();
-
- // Delaunay triangulation is point dimension.
- triangulation_ = new Delaunay_triangulation(point_dimension(*first));
+ // Delaunay triangulation init with point dimension.
+ triangulation_ = std::make_unique<Triangulation>(kernel_.get_dimension(*first));
std::vector<Point_d> point_cloud(first, last);
// Creates a vector {0, 1, ..., N-1}
- std::vector<std::ptrdiff_t> indices(boost::counting_iterator<std::ptrdiff_t>(0),
- boost::counting_iterator<std::ptrdiff_t>(point_cloud.size()));
+ std::vector<Internal_vertex_handle> indices(boost::counting_iterator<Internal_vertex_handle>(0),
+ boost::counting_iterator<Internal_vertex_handle>(point_cloud.size()));
- typedef boost::iterator_property_map<typename std::vector<Point_d>::iterator,
- CGAL::Identity_property_map<std::ptrdiff_t>> Point_property_map;
- typedef CGAL::Spatial_sort_traits_adapter_d<Kernel, Point_property_map> Search_traits_d;
+ using Point_property_map = boost::iterator_property_map<typename std::vector<Point_d>::iterator,
+ CGAL::Identity_property_map<Internal_vertex_handle>>;
+ using Search_traits_d = CGAL::Spatial_sort_traits_adapter_d<Geom_traits, Point_property_map>;
CGAL::spatial_sort(indices.begin(), indices.end(), Search_traits_d(std::begin(point_cloud)));
- typename Delaunay_triangulation::Full_cell_handle hint;
+ typename Triangulation::Full_cell_handle hint;
for (auto index : indices) {
- typename Delaunay_triangulation::Vertex_handle pos = triangulation_->insert(point_cloud[index], hint);
- // Save index value as data to retrieve it after insertion
- pos->data() = index;
- hint = pos->full_cell();
+ typename Triangulation::Vertex_handle pos = triangulation_->insert(point_cloud[index], hint);
+ if (pos != nullptr) {
+ // Save index value as data to retrieve it after insertion
+ pos->data() = index;
+ hint = pos->full_cell();
+ }
}
// --------------------------------------------------------------------------------------------
// structure to retrieve CGAL points from vertex handle - one vertex handle per point.
// Needs to be constructed before as vertex handles arrives in no particular order.
vertex_handle_to_iterator_.resize(point_cloud.size());
+ // List of sorted unique vertices in the triangulation. We take advantage of the existing loop to construct it
+ // Vertices list avoids quadratic complexity with the Simplex_tree. We should not fill it up with Toplex_map e.g.
+ vertices_.reserve(triangulation_->number_of_vertices());
// Loop on triangulation vertices list
for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) {
if (!triangulation_->is_infinite(*vit)) {
#ifdef DEBUG_TRACES
- std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
+ std::clog << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
#endif // DEBUG_TRACES
vertex_handle_to_iterator_[vit->data()] = vit;
+ vertices_.push_back(vit->data());
}
}
+ std::sort(vertices_.begin(), vertices_.end());
// --------------------------------------------------------------------------------------------
}
}
+ /** \brief get_point_ returns the point corresponding to the vertex given as parameter.
+ * Only for internal use for faster access.
+ *
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ * @return The point found.
+ */
+ const Point_d& get_point_(std::size_t vertex) const {
+ return vertex_handle_to_iterator_[vertex]->point();
+ }
+
+ /// Return a reference to the circumcenter and circumradius, writing them in the cache if necessary.
+ template<class SimplicialComplexForAlpha>
+ auto& get_cache(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) {
+ auto k = cplx.key(s);
+ if(k==cplx.null_key()){
+ k = cache_.size();
+ cplx.assign_key(s, k);
+ // Using a transform_range is slower, currently.
+ thread_local std::vector<Point_d> v;
+ v.clear();
+ for (auto vertex : cplx.simplex_vertex_range(s))
+ v.push_back(get_point_(vertex));
+ cache_.emplace_back(kernel_.get_sphere(v.cbegin(), v.cend()));
+ }
+ return cache_[k];
+ }
+
+ /// Return the circumradius, either from the old cache or computed, without writing to the cache.
+ template<class SimplicialComplexForAlpha>
+ auto radius(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) {
+ auto k = cplx.key(s);
+ if(k!=cplx.null_key())
+ return kernel_.get_squared_radius(old_cache_[k]);
+ // Using a transform_range is slower, currently.
+ thread_local std::vector<Point_d> v;
+ v.clear();
+ for (auto vertex : cplx.simplex_vertex_range(s))
+ v.push_back(get_point_(vertex));
+ return kernel_.get_squared_radius(v.cbegin(), v.cend());
+ }
+
public:
/** \brief Inserts all Delaunay triangulation into the simplicial complex.
- * It also computes the filtration values accordingly to the \ref createcomplexalgorithm
+ * It also computes the filtration values accordingly to the \ref createcomplexalgorithm if default_filtration_value
+ * is not set.
*
* \tparam SimplicialComplexForAlpha must meet `SimplicialComplexForAlpha` concept.
*
* @param[in] complex SimplicialComplexForAlpha to be created.
* @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$, and there is very
- * little point using anything else since it does not save time.
+ * little point using anything else since it does not save time. Useless if `default_filtration_value` is set to
+ * `true`.
* @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
* href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
- *
+ * @param[in] default_filtration_value Set this value to `true` if filtration values are not needed to be computed
+ * (will be set to `NaN`).
+ * Default value is `false` (which means compute the filtration values).
+ *
* @return true if creation succeeds, false otherwise.
*
* @pre Delaunay triangulation must be already constructed with dimension strictly greater than 0.
@@ -269,11 +371,12 @@ class Alpha_complex {
typename Filtration_value = typename SimplicialComplexForAlpha::Filtration_value>
bool create_complex(SimplicialComplexForAlpha& complex,
Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity(),
- bool exact = false) {
+ bool exact = false,
+ bool default_filtration_value = false) {
// From SimplicialComplexForAlpha type required to insert into a simplicial complex (with or without subfaces).
- typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
- typedef typename SimplicialComplexForAlpha::Simplex_handle Simplex_handle;
- typedef std::vector<Vertex_handle> Vector_vertex;
+ using Vertex_handle = typename SimplicialComplexForAlpha::Vertex_handle;
+ using Simplex_handle = typename SimplicialComplexForAlpha::Simplex_handle;
+ using Vector_vertex = std::vector<Vertex_handle>;
if (triangulation_ == nullptr) {
std::cerr << "Alpha_complex cannot create_complex from a NULL triangulation\n";
@@ -290,25 +393,34 @@ class Alpha_complex {
// --------------------------------------------------------------------------------------------
// Simplex_tree construction from loop on triangulation finite full cells list
- if (triangulation_->number_of_vertices() > 0) {
+ if (num_vertices() > 0) {
+ std::vector<Vertex_handle> one_vertex(1);
+ for (auto vertex : vertices_) {
+#ifdef DEBUG_TRACES
+ std::clog << "SimplicialComplex insertion " << vertex << std::endl;
+#endif // DEBUG_TRACES
+ one_vertex[0] = vertex;
+ complex.insert_simplex_and_subfaces(one_vertex, std::numeric_limits<double>::quiet_NaN());
+ }
+
for (auto cit = triangulation_->finite_full_cells_begin();
cit != triangulation_->finite_full_cells_end();
++cit) {
Vector_vertex vertexVector;
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree insertion ";
+ std::clog << "SimplicialComplex insertion ";
#endif // DEBUG_TRACES
for (auto vit = cit->vertices_begin(); vit != cit->vertices_end(); ++vit) {
if (*vit != nullptr) {
#ifdef DEBUG_TRACES
- std::cout << " " << (*vit)->data();
+ std::clog << " " << (*vit)->data();
#endif // DEBUG_TRACES
// Vector of vertex construction for simplex_tree structure
vertexVector.push_back((*vit)->data());
}
}
#ifdef DEBUG_TRACES
- std::cout << std::endl;
+ std::clog << std::endl;
#endif // DEBUG_TRACES
// Insert each simplex and its subfaces in the simplex tree - filtration is NaN
complex.insert_simplex_and_subfaces(vertexVector, std::numeric_limits<double>::quiet_NaN());
@@ -316,62 +428,50 @@ class Alpha_complex {
}
// --------------------------------------------------------------------------------------------
- // --------------------------------------------------------------------------------------------
- // Will be re-used many times
- Vector_of_CGAL_points pointVector;
- // ### For i : d -> 0
- for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) {
- // ### Foreach Sigma of dim i
- for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) {
- int f_simplex_dim = complex.dimension(f_simplex);
- if (decr_dim == f_simplex_dim) {
- pointVector.clear();
-#ifdef DEBUG_TRACES
- std::cout << "Sigma of dim " << decr_dim << " is";
-#endif // DEBUG_TRACES
- for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
- pointVector.push_back(get_point(vertex));
-#ifdef DEBUG_TRACES
- std::cout << " " << vertex;
-#endif // DEBUG_TRACES
- }
-#ifdef DEBUG_TRACES
- std::cout << std::endl;
-#endif // DEBUG_TRACES
- // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma)
- if (std::isnan(complex.filtration(f_simplex))) {
- Filtration_value alpha_complex_filtration = 0.0;
- // No need to compute squared_radius on a single point - alpha is 0.0
- if (f_simplex_dim > 0) {
- // squared_radius function initialization
- Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object();
-
- CGAL::NT_converter<typename Geom_traits::FT, Filtration_value> cv;
- auto sqrad = squared_radius(pointVector.begin(), pointVector.end());
+ if (!default_filtration_value) {
+ CGAL::NT_converter<FT, Filtration_value> cgal_converter;
+ // --------------------------------------------------------------------------------------------
+ // ### For i : d -> 0
+ for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) {
+ // ### Foreach Sigma of dim i
+ for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) {
+ int f_simplex_dim = complex.dimension(f_simplex);
+ if (decr_dim == f_simplex_dim) {
+ // ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma)
+ if (std::isnan(complex.filtration(f_simplex))) {
+ Filtration_value alpha_complex_filtration = 0.0;
+ // No need to compute squared_radius on a non-weighted single point - alpha is 0.0
+ if (Weighted || f_simplex_dim > 0) {
+ auto const& sqrad = radius(complex, f_simplex);
#if CGAL_VERSION_NR >= 1050000000
- if(exact) CGAL::exact(sqrad);
+ if(exact) CGAL::exact(sqrad);
#endif
- alpha_complex_filtration = cv(sqrad);
- }
- complex.assign_filtration(f_simplex, alpha_complex_filtration);
+ alpha_complex_filtration = cgal_converter(sqrad);
+ }
+ complex.assign_filtration(f_simplex, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl;
+ std::clog << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl;
#endif // DEBUG_TRACES
+ }
+ // No need to propagate further, unweighted points all have value 0
+ if (decr_dim > !Weighted)
+ propagate_alpha_filtration(complex, f_simplex);
}
- // No need to propagate further, unweighted points all have value 0
- if (decr_dim > 1)
- propagate_alpha_filtration(complex, f_simplex);
}
+ old_cache_ = std::move(cache_);
+ cache_.clear();
}
+ // --------------------------------------------------------------------------------------------
+
+ // --------------------------------------------------------------------------------------------
+ if (!exact)
+ // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
+ // Only in not exact version, cf. https://github.com/GUDHI/gudhi-devel/issues/57
+ complex.make_filtration_non_decreasing();
+ // Remove all simplices that have a filtration value greater than max_alpha_square
+ complex.prune_above_filtration(max_alpha_square);
+ // --------------------------------------------------------------------------------------------
}
- // --------------------------------------------------------------------------------------------
-
- // --------------------------------------------------------------------------------------------
- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- complex.make_filtration_non_decreasing();
- // Remove all simplices that have a filtration value greater than max_alpha_square
- complex.prune_above_filtration(max_alpha_square);
- // --------------------------------------------------------------------------------------------
return true;
}
@@ -379,21 +479,19 @@ class Alpha_complex {
template <typename SimplicialComplexForAlpha, typename Simplex_handle>
void propagate_alpha_filtration(SimplicialComplexForAlpha& complex, Simplex_handle f_simplex) {
// From SimplicialComplexForAlpha type required to assign filtration values.
- typedef typename SimplicialComplexForAlpha::Filtration_value Filtration_value;
-#ifdef DEBUG_TRACES
- typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
-#endif // DEBUG_TRACES
+ using Filtration_value = typename SimplicialComplexForAlpha::Filtration_value;
// ### Foreach Tau face of Sigma
- for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) {
+ for (auto face_opposite_vertex : complex.boundary_opposite_vertex_simplex_range(f_simplex)) {
+ auto f_boundary = face_opposite_vertex.first;
#ifdef DEBUG_TRACES
- std::cout << " | --------------------------------------------------\n";
- std::cout << " | Tau ";
+ std::clog << " | --------------------------------------------------\n";
+ std::clog << " | Tau ";
for (auto vertex : complex.simplex_vertex_range(f_boundary)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << "is a face of Sigma\n";
- std::cout << " | isnan(complex.filtration(Tau)=" << std::isnan(complex.filtration(f_boundary)) << std::endl;
+ std::clog << "is a face of Sigma\n";
+ std::clog << " | isnan(complex.filtration(Tau)=" << std::isnan(complex.filtration(f_boundary)) << std::endl;
#endif // DEBUG_TRACES
// ### If filt(Tau) is not NaN
if (!std::isnan(complex.filtration(f_boundary))) {
@@ -402,37 +500,14 @@ class Alpha_complex {
complex.filtration(f_simplex));
complex.assign_filtration(f_boundary, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << complex.filtration(f_boundary) << std::endl;
+ std::clog << " | filt(Tau) = fmin(filt(Tau), filt(Sigma)) = " << complex.filtration(f_boundary) << std::endl;
#endif // DEBUG_TRACES
// ### Else
} else {
- // insert the Tau points in a vector for is_gabriel function
- Vector_of_CGAL_points pointVector;
-#ifdef DEBUG_TRACES
- Vertex_handle vertexForGabriel = Vertex_handle();
-#endif // DEBUG_TRACES
- for (auto vertex : complex.simplex_vertex_range(f_boundary)) {
- pointVector.push_back(get_point(vertex));
- }
- // Retrieve the Sigma point that is not part of Tau - parameter for is_gabriel function
- Point_d point_for_gabriel;
- for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
- point_for_gabriel = get_point(vertex);
- if (std::find(pointVector.begin(), pointVector.end(), point_for_gabriel) == pointVector.end()) {
-#ifdef DEBUG_TRACES
- // vertex is not found in Tau
- vertexForGabriel = vertex;
-#endif // DEBUG_TRACES
- // No need to continue loop
- break;
- }
- }
- // is_gabriel function initialization
- Is_Gabriel is_gabriel = kernel_.side_of_bounded_sphere_d_object();
- bool is_gab = is_gabriel(pointVector.begin(), pointVector.end(), point_for_gabriel)
- != CGAL::ON_BOUNDED_SIDE;
+ auto const& cache=get_cache(complex, f_boundary);
+ bool is_gab = kernel_.is_gabriel(cache, get_point_(face_opposite_vertex.second));
#ifdef DEBUG_TRACES
- std::cout << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl;
+ std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << face_opposite_vertex.second << std::endl;
#endif // DEBUG_TRACES
// ### If Tau is not Gabriel of Sigma
if (false == is_gab) {
@@ -440,7 +515,7 @@ class Alpha_complex {
Filtration_value alpha_complex_filtration = complex.filtration(f_simplex);
complex.assign_filtration(f_boundary, alpha_complex_filtration);
#ifdef DEBUG_TRACES
- std::cout << " | filt(Tau) = filt(Sigma) = " << complex.filtration(f_boundary) << std::endl;
+ std::clog << " | filt(Tau) = filt(Sigma) = " << complex.filtration(f_boundary) << std::endl;
#endif // DEBUG_TRACES
}
}
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex/Alpha_kernel_d.h b/src/Alpha_complex/include/gudhi/Alpha_complex/Alpha_kernel_d.h
new file mode 100644
index 00000000..28d6d7a9
--- /dev/null
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex/Alpha_kernel_d.h
@@ -0,0 +1,141 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef ALPHA_COMPLEX_ALPHA_KERNEL_D_H_
+#define ALPHA_COMPLEX_ALPHA_KERNEL_D_H_
+
+#include <CGAL/version.h> // for CGAL_VERSION_NR
+
+#include <Eigen/Core> // for EIGEN_VERSION_AT_LEAST
+
+#include <utility> // for std::make_pair
+
+// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
+#if CGAL_VERSION_NR < 1041101000
+# error Alpha_complex is only available for CGAL >= 4.11
+#endif
+
+#if !EIGEN_VERSION_AT_LEAST(3,1,0)
+# error Alpha_complex is only available for Eigen3 >= 3.1.0 installed with CGAL
+#endif
+
+namespace Gudhi {
+
+namespace alpha_complex {
+
+/**
+ * \class Alpha_kernel_d
+ * \brief Alpha complex kernel container.
+ *
+ * \details
+ * The Alpha complex kernel container stores CGAL Kernel and dispatch basics computations in function of the weighted
+ * or not version of the Alpha complex.
+ */
+template < typename Kernel, bool Weighted = false >
+class Alpha_kernel_d {
+};
+
+// Unweighted Kernel_d version
+template < typename Kernel >
+class Alpha_kernel_d<Kernel, false> {
+ private:
+ // Kernel for functions access.
+ Kernel kernel_;
+ public:
+ // Fake type for compilation to succeed (cf. std::conditional in Alpha_complex.h)
+ using Weighted_point_d = void;
+ using Point_d = typename Kernel::Point_d;
+ // Numeric type of coordinates in the kernel
+ using FT = typename Kernel::FT;
+ // Sphere is a pair of point and squared radius.
+ using Sphere = typename std::pair<Point_d, FT>;
+
+ int get_dimension(const Point_d& p0) const {
+ return kernel_.point_dimension_d_object()(p0);
+ }
+
+ template<class PointIterator>
+ Sphere get_sphere(PointIterator begin, PointIterator end) const {
+ Point_d c = kernel_.construct_circumcenter_d_object()(begin, end);
+ FT r = kernel_.squared_distance_d_object()(c, *begin);
+ return std::make_pair(std::move(c), std::move(r));
+ }
+
+ template<class PointIterator>
+ FT get_squared_radius(PointIterator begin, PointIterator end) const {
+ return kernel_.compute_squared_radius_d_object()(begin, end);
+ }
+
+ FT get_squared_radius(const Sphere& sph) const {
+ return sph.second;
+ }
+
+ bool is_gabriel(const Sphere& circumcenter, const Point_d& point) {
+ return kernel_.squared_distance_d_object()(circumcenter.first, point) >= circumcenter.second;
+ }
+};
+
+// Weighted Kernel_d version
+template < typename Kernel >
+class Alpha_kernel_d<Kernel, true> {
+ private:
+ // Kernel for functions access.
+ Kernel kernel_;
+
+ public:
+ // Fake type for compilation to succeed (cf. std::conditional in Alpha_complex.h)
+ using Point_d = void;
+ using Weighted_point_d = typename Kernel::Weighted_point_d;
+ using Bare_point_d = typename Kernel::Point_d;
+ // Numeric type of coordinates in the kernel
+ using FT = typename Kernel::FT;
+ // Sphere is a weighted point (point + weight [= squared radius]).
+ using Sphere = Weighted_point_d;
+
+ int get_dimension(const Weighted_point_d& p0) const {
+ return kernel_.point_dimension_d_object()(p0.point());
+ }
+
+ template<class PointIterator>
+ Sphere get_sphere(PointIterator begin, PointIterator end) const {
+ // power_center_d_object has been renamed between CGAL 5.1 and 5.2
+#if CGAL_VERSION_NR < 1050200000
+ return kernel_.power_center_d_object()(begin, end);
+#else
+ return kernel_.construct_power_sphere_d_object()(begin, end);
+#endif
+ }
+
+ template<class PointIterator>
+ FT get_squared_radius(PointIterator begin, PointIterator end) const {
+ return kernel_.compute_squared_radius_smallest_orthogonal_sphere_d_object()(begin, end);
+ }
+
+ FT get_squared_radius(const Sphere& sph) const {
+ return sph.weight();
+ }
+
+ bool is_gabriel(const Sphere& circumcenter, const Weighted_point_d& point) {
+ // power_center_d_object has been renamed between CGAL 5.1 and 5.2
+#if CGAL_VERSION_NR < 1050200000
+ return kernel_.power_distance_d_object()(circumcenter, point) >= 0;
+#else
+ return kernel_.compute_power_product_d_object()(circumcenter, point) >= 0;
+#endif
+ }
+};
+
+} // namespace alpha_complex
+
+namespace alphacomplex = alpha_complex;
+
+} // namespace Gudhi
+
+#endif // ALPHA_COMPLEX_ALPHA_KERNEL_D_H_ \ No newline at end of file
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index 7f96c94c..562ef139 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -12,8 +12,10 @@
#ifndef ALPHA_COMPLEX_3D_H_
#define ALPHA_COMPLEX_3D_H_
-#include <boost/version.hpp>
#include <boost/variant.hpp>
+#include <boost/range/size.hpp>
+#include <boost/range/combine.hpp>
+#include <boost/range/adaptor/transformed.hpp>
#include <gudhi/Debug_utils.h>
#include <gudhi/Alpha_complex_options.h>
@@ -35,8 +37,6 @@
#include <CGAL/iterator.h>
#include <CGAL/version.h> // for CGAL_VERSION_NR
-#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
-
#include <boost/container/static_vector.hpp>
#include <iostream>
@@ -53,19 +53,10 @@
# error Alpha_complex_3d is only available for CGAL >= 4.11
#endif
-#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
-#endif
-
namespace Gudhi {
namespace alpha_complex {
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
-thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001;
-
// Value_from_iterator returns the filtration value from an iterator on alpha shapes values
//
// FAST SAFE EXACT
@@ -107,7 +98,7 @@ struct Value_from_iterator<complexity::EXACT> {
* \tparam Periodic Boolean used to set/unset the periodic version of Alpha_complex_3d. Default value is false.
*
* For the weighted version, weights values are explained on CGAL
- * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0">Alpha shapes 3d</a> and
+ * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions">Alpha shapes 3d</a> and
* <a href="https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation">Regular
* triangulation</a> documentation.
*
@@ -160,8 +151,10 @@ class Alpha_complex_3d {
using Kernel = CGAL::Periodic_3_regular_triangulation_traits_3<Predicates>;
};
+ public:
using Kernel = typename Kernel_3<Predicates, Weighted, Periodic>::Kernel;
+ private:
using TdsVb = typename std::conditional<Periodic, CGAL::Periodic_3_triangulation_ds_vertex_base_3<>,
CGAL::Triangulation_ds_vertex_base_3<>>::type;
@@ -280,8 +273,8 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
Alpha_complex_3d(const InputPointRange& points) {
static_assert(!Periodic, "This constructor is not available for periodic versions of Alpha_complex_3d");
- alpha_shape_3_ptr_ = std::unique_ptr<Alpha_shape_3>(
- new Alpha_shape_3(std::begin(points), std::end(points), 0, Alpha_shape_3::GENERAL));
+ alpha_shape_3_ptr_ = std::make_unique<Alpha_shape_3>(
+ std::begin(points), std::end(points), 0, Alpha_shape_3::GENERAL);
}
/** \brief Alpha_complex constructor from a list of points and associated weights.
@@ -302,20 +295,15 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
Alpha_complex_3d(const InputPointRange& points, WeightRange weights) {
static_assert(Weighted, "This constructor is not available for non-weighted versions of Alpha_complex_3d");
static_assert(!Periodic, "This constructor is not available for periodic versions of Alpha_complex_3d");
- GUDHI_CHECK((weights.size() == points.size()),
+ // FIXME: this test is only valid if we have a forward range
+ GUDHI_CHECK(boost::size(weights) == boost::size(points),
std::invalid_argument("Points number in range different from weights range number"));
- std::vector<Weighted_point_3> weighted_points_3;
+ auto weighted_points_3 = boost::range::combine(points, weights)
+ | boost::adaptors::transformed([](auto const&t){return Weighted_point_3(boost::get<0>(t), boost::get<1>(t));});
- std::size_t index = 0;
- weighted_points_3.reserve(points.size());
- while ((index < weights.size()) && (index < points.size())) {
- weighted_points_3.push_back(Weighted_point_3(points[index], weights[index]));
- index++;
- }
-
- alpha_shape_3_ptr_ = std::unique_ptr<Alpha_shape_3>(
- new Alpha_shape_3(std::begin(weighted_points_3), std::end(weighted_points_3), 0, Alpha_shape_3::GENERAL));
+ alpha_shape_3_ptr_ = std::make_unique<Alpha_shape_3>(
+ std::begin(weighted_points_3), std::end(weighted_points_3), 0, Alpha_shape_3::GENERAL);
}
/** \brief Alpha_complex constructor from a list of points and an iso-cuboid coordinates.
@@ -359,7 +347,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
// alpha shape construction from points. CGAL has a strange behavior in REGULARIZED mode. This is the default mode
// Maybe need to set it to GENERAL mode
- alpha_shape_3_ptr_ = std::unique_ptr<Alpha_shape_3>(new Alpha_shape_3(pdt, 0, Alpha_shape_3::GENERAL));
+ alpha_shape_3_ptr_ = std::make_unique<Alpha_shape_3>(pdt, 0, Alpha_shape_3::GENERAL);
}
/** \brief Alpha_complex constructor from a list of points, associated weights and an iso-cuboid coordinates.
@@ -391,31 +379,27 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
FT z_min, FT x_max, FT y_max, FT z_max) {
static_assert(Weighted, "This constructor is not available for non-weighted versions of Alpha_complex_3d");
static_assert(Periodic, "This constructor is not available for non-periodic versions of Alpha_complex_3d");
- GUDHI_CHECK((weights.size() == points.size()),
+ // FIXME: this test is only valid if we have a forward range
+ GUDHI_CHECK(boost::size(weights) == boost::size(points),
std::invalid_argument("Points number in range different from weights range number"));
// Checking if the cuboid is the same in x,y and z direction. If not, CGAL will not process it.
GUDHI_CHECK(
(x_max - x_min == y_max - y_min) && (x_max - x_min == z_max - z_min) && (z_max - z_min == y_max - y_min),
std::invalid_argument("The size of the cuboid in every directions is not the same."));
- std::vector<Weighted_point_3> weighted_points_3;
-
- std::size_t index = 0;
- weighted_points_3.reserve(points.size());
-
#ifdef GUDHI_DEBUG
// Defined in GUDHI_DEBUG to avoid unused variable warning for GUDHI_CHECK
FT maximal_possible_weight = 0.015625 * (x_max - x_min) * (x_max - x_min);
#endif
- while ((index < weights.size()) && (index < points.size())) {
- GUDHI_CHECK((weights[index] < maximal_possible_weight) && (weights[index] >= 0),
- std::invalid_argument("Invalid weight at index " + std::to_string(index + 1) +
- ". Must be positive and less than maximal possible weight = 1/64*cuboid length "
- "squared, which is not an acceptable input."));
- weighted_points_3.push_back(Weighted_point_3(points[index], weights[index]));
- index++;
- }
+ auto weighted_points_3 = boost::range::combine(points, weights)
+ | boost::adaptors::transformed([=](auto const&t){
+ auto w = boost::get<1>(t);
+ GUDHI_CHECK((w < maximal_possible_weight) && (w >= 0),
+ std::invalid_argument("Invalid weight " + std::to_string(w) +
+ ". Must be non-negative and less than maximal possible weight = 1/64*cuboid length squared."));
+ return Weighted_point_3(boost::get<0>(t), w);
+ });
// Define the periodic cube
Dt pdt(typename Kernel::Iso_cuboid_3(x_min, y_min, z_min, x_max, y_max, z_max));
@@ -429,7 +413,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
// alpha shape construction from points. CGAL has a strange behavior in REGULARIZED mode. This is the default mode
// Maybe need to set it to GENERAL mode
- alpha_shape_3_ptr_ = std::unique_ptr<Alpha_shape_3>(new Alpha_shape_3(pdt, 0, Alpha_shape_3::GENERAL));
+ alpha_shape_3_ptr_ = std::make_unique<Alpha_shape_3>(pdt, 0, Alpha_shape_3::GENERAL);
}
/** \brief Inserts all Delaunay triangulation into the simplicial complex.
@@ -472,8 +456,12 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
alpha_shape_3_ptr_->filtration_with_alpha_values(dispatcher);
#ifdef DEBUG_TRACES
- std::cout << "filtration_with_alpha_values returns : " << objects.size() << " objects" << std::endl;
+ std::clog << "filtration_with_alpha_values returns : " << objects.size() << " objects" << std::endl;
#endif // DEBUG_TRACES
+ if (objects.size() == 0) {
+ std::cerr << "Alpha_complex_3d create_complex - no triangulation as points are on a 2d plane\n";
+ return false; // ----- >>
+ }
using Alpha_value_iterator = typename std::vector<FT>::const_iterator;
Alpha_value_iterator alpha_value_iterator = alpha_values.begin();
@@ -484,7 +472,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
if (const Cell_handle* cell = CGAL::object_cast<Cell_handle>(&object_iterator)) {
for (auto i = 0; i < 4; i++) {
#ifdef DEBUG_TRACES
- std::cout << "from cell[" << i << "] - Point coordinates (" << (*cell)->vertex(i)->point() << ")"
+ std::clog << "from cell[" << i << "] - Point coordinates (" << (*cell)->vertex(i)->point() << ")"
<< std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*cell)->vertex(i));
@@ -496,7 +484,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
for (auto i = 0; i < 4; i++) {
if ((*facet).second != i) {
#ifdef DEBUG_TRACES
- std::cout << "from facet=[" << i << "] - Point coordinates (" << (*facet).first->vertex(i)->point() << ")"
+ std::clog << "from facet=[" << i << "] - Point coordinates (" << (*facet).first->vertex(i)->point() << ")"
<< std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*facet).first->vertex(i));
@@ -508,7 +496,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
} else if (const Edge* edge = CGAL::object_cast<Edge>(&object_iterator)) {
for (auto i : {(*edge).second, (*edge).third}) {
#ifdef DEBUG_TRACES
- std::cout << "from edge[" << i << "] - Point coordinates (" << (*edge).first->vertex(i)->point() << ")"
+ std::clog << "from edge[" << i << "] - Point coordinates (" << (*edge).first->vertex(i)->point() << ")"
<< std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*edge).first->vertex(i));
@@ -519,7 +507,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
} else if (const Alpha_vertex_handle* vertex = CGAL::object_cast<Alpha_vertex_handle>(&object_iterator)) {
#ifdef DEBUG_TRACES
count_vertices++;
- std::cout << "from vertex - Point coordinates (" << (*vertex)->point() << ")" << std::endl;
+ std::clog << "from vertex - Point coordinates (" << (*vertex)->point() << ")" << std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*vertex));
}
@@ -531,7 +519,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
// alpha shape not found
Complex_vertex_handle vertex = map_cgal_simplex_tree.size();
#ifdef DEBUG_TRACES
- std::cout << "Point (" << the_alpha_shape_vertex->point() << ") not found - insert new vertex id " << vertex
+ std::clog << "Point (" << the_alpha_shape_vertex->point() << ") not found - insert new vertex id " << vertex
<< std::endl;
#endif // DEBUG_TRACES
the_simplex.push_back(vertex);
@@ -540,7 +528,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
// alpha shape found
Complex_vertex_handle vertex = the_map_iterator->second;
#ifdef DEBUG_TRACES
- std::cout << "Point (" << the_alpha_shape_vertex->point() << ") found as vertex id " << vertex << std::endl;
+ std::clog << "Point (" << the_alpha_shape_vertex->point() << ") found as vertex id " << vertex << std::endl;
#endif // DEBUG_TRACES
the_simplex.push_back(vertex);
}
@@ -549,7 +537,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
Filtration_value filtr = Value_from_iterator<Complexity>::perform(alpha_value_iterator);
#ifdef DEBUG_TRACES
- std::cout << "filtration = " << filtr << std::endl;
+ std::clog << "filtration = " << filtr << std::endl;
#endif // DEBUG_TRACES
complex.insert_simplex(the_simplex, static_cast<Filtration_value>(filtr));
GUDHI_CHECK(alpha_value_iterator != alpha_values.end(), "CGAL provided more simplices than values");
@@ -557,14 +545,16 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
}
#ifdef DEBUG_TRACES
- std::cout << "vertices \t" << count_vertices << std::endl;
- std::cout << "edges \t\t" << count_edges << std::endl;
- std::cout << "facets \t\t" << count_facets << std::endl;
- std::cout << "cells \t\t" << count_cells << std::endl;
+ std::clog << "vertices \t" << count_vertices << std::endl;
+ std::clog << "edges \t\t" << count_edges << std::endl;
+ std::clog << "facets \t\t" << count_facets << std::endl;
+ std::clog << "cells \t\t" << count_cells << std::endl;
#endif // DEBUG_TRACES
// --------------------------------------------------------------------------------------------
- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- complex.make_filtration_non_decreasing();
+ if (Complexity == complexity::FAST)
+ // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
+ // Only in FAST version, cf. https://github.com/GUDHI/gudhi-devel/issues/57
+ complex.make_filtration_non_decreasing();
// Remove all simplices that have a filtration value greater than max_alpha_square
complex.prune_above_filtration(max_alpha_square);
// --------------------------------------------------------------------------------------------
diff --git a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
index cd698a27..a4ecb6ad 100644
--- a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
@@ -54,7 +54,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// -----------------
// Fast version
// -----------------
- std::cout << "Fast alpha complex 3d" << std::endl;
+ std::clog << "Fast alpha complex 3d" << std::endl;
std::vector<Fast_alpha_complex_3d::Bare_point_3> points = get_points<Fast_alpha_complex_3d::Bare_point_3>();
Fast_alpha_complex_3d alpha_complex(points);
@@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// -----------------
// Exact version
// -----------------
- std::cout << "Exact alpha complex 3d" << std::endl;
+ std::clog << "Exact alpha complex 3d" << std::endl;
std::vector<Exact_alpha_complex_3d::Bare_point_3> exact_points = get_points<Exact_alpha_complex_3d::Bare_point_3>();
Exact_alpha_complex_3d exact_alpha_complex(exact_points);
@@ -105,13 +105,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// ---------------------
// Compare both versions
// ---------------------
- std::cout << "Exact Alpha complex 3d is of dimension " << exact_stree.dimension() << " - Fast is "
+ std::clog << "Exact Alpha complex 3d is of dimension " << exact_stree.dimension() << " - Fast is "
<< stree.dimension() << std::endl;
BOOST_CHECK(exact_stree.dimension() == stree.dimension());
- std::cout << "Exact Alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Fast is "
+ std::clog << "Exact Alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Fast is "
<< stree.num_simplices() << std::endl;
BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices());
- std::cout << "Exact Alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Fast is "
+ std::clog << "Exact Alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Fast is "
<< stree.num_vertices() << std::endl;
BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices());
@@ -119,18 +119,18 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
while (sh != stree.filtration_simplex_range().end()) {
std::vector<int> simplex;
std::vector<int> exact_simplex;
- std::cout << "Fast ( ";
+ std::clog << "Fast ( ";
for (auto vertex : stree.simplex_vertex_range(*sh)) {
simplex.push_back(vertex);
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> [" << stree.filtration(*sh) << "] ";
+ std::clog << ") -> [" << stree.filtration(*sh) << "] ";
// Find it in the exact structure
auto sh_exact = exact_stree.find(simplex);
BOOST_CHECK(sh_exact != exact_stree.null_simplex());
- std::cout << " versus [" << exact_stree.filtration(sh_exact) << "] " << std::endl;
+ std::clog << " versus [" << exact_stree.filtration(sh_exact) << "] " << std::endl;
// Exact and non-exact version is not exactly the same due to float comparison
GUDHI_TEST_FLOAT_EQUALITY_CHECK(exact_stree.filtration(sh_exact), stree.filtration(*sh));
@@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// -----------------
// Safe version
// -----------------
- std::cout << "Safe alpha complex 3d" << std::endl;
+ std::clog << "Safe alpha complex 3d" << std::endl;
std::vector<Safe_alpha_complex_3d::Bare_point_3> safe_points = get_points<Safe_alpha_complex_3d::Bare_point_3>();
Safe_alpha_complex_3d safe_alpha_complex(safe_points);
@@ -165,13 +165,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// ---------------------
// Compare both versions
// ---------------------
- std::cout << "Safe Alpha complex 3d is of dimension " << safe_stree.dimension() << " - Fast is "
+ std::clog << "Safe Alpha complex 3d is of dimension " << safe_stree.dimension() << " - Fast is "
<< stree.dimension() << std::endl;
BOOST_CHECK(safe_stree.dimension() == stree.dimension());
- std::cout << "Safe Alpha complex 3d num_simplices " << safe_stree.num_simplices() << " - Fast is "
+ std::clog << "Safe Alpha complex 3d num_simplices " << safe_stree.num_simplices() << " - Fast is "
<< stree.num_simplices() << std::endl;
BOOST_CHECK(safe_stree.num_simplices() == stree.num_simplices());
- std::cout << "Safe Alpha complex 3d num_vertices " << safe_stree.num_vertices() << " - Fast is "
+ std::clog << "Safe Alpha complex 3d num_vertices " << safe_stree.num_vertices() << " - Fast is "
<< stree.num_vertices() << std::endl;
BOOST_CHECK(safe_stree.num_vertices() == stree.num_vertices());
@@ -179,18 +179,18 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
while (safe_sh != stree.filtration_simplex_range().end()) {
std::vector<int> simplex;
std::vector<int> exact_simplex;
- std::cout << "Fast ( ";
+ std::clog << "Fast ( ";
for (auto vertex : stree.simplex_vertex_range(*safe_sh)) {
simplex.push_back(vertex);
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> [" << stree.filtration(*safe_sh) << "] ";
+ std::clog << ") -> [" << stree.filtration(*safe_sh) << "] ";
// Find it in the exact structure
auto sh_exact = safe_stree.find(simplex);
BOOST_CHECK(sh_exact != safe_stree.null_simplex());
- std::cout << " versus [" << safe_stree.filtration(sh_exact) << "] " << std::endl;
+ std::clog << " versus [" << safe_stree.filtration(sh_exact) << "] " << std::endl;
// Exact and non-exact version is not exactly the same due to float comparison
GUDHI_TEST_FLOAT_EQUALITY_CHECK(safe_stree.filtration(sh_exact), stree.filtration(*safe_sh), 1e-15);
diff --git a/src/Alpha_complex/test/Alpha_complex_dim3_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_dim3_unit_test.cpp
new file mode 100644
index 00000000..e7c261f1
--- /dev/null
+++ b/src/Alpha_complex/test/Alpha_complex_dim3_unit_test.cpp
@@ -0,0 +1,117 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2015 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "alpha_complex_dim3"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+
+#include <stdexcept> // std::out_of_range
+#include <string>
+#include <vector>
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dimension_tag<3> > Exact_kernel_s;
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dimension_tag<3> > Inexact_kernel_s;
+// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter
+
+typedef boost::mpl::list<Exact_kernel_d, Exact_kernel_s, Inexact_kernel_d, Inexact_kernel_s> list_of_kernel_variants;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) {
+ // ----------------------------------------------------------------------------
+ //
+ // Init of an alpha-complex from a OFF file
+ //
+ // ----------------------------------------------------------------------------
+ std::string off_file_name("alphacomplexdoc.off");
+ double max_alpha_square_value = 60.0;
+ std::clog << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" <<
+ max_alpha_square_value << "==========" << std::endl;
+
+ Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_file(off_file_name);
+
+ Gudhi::Simplex_tree<> simplex_tree_60;
+ BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_60, max_alpha_square_value));
+
+ std::clog << "alpha_complex_from_file.num_vertices()=" << alpha_complex_from_file.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_file.num_vertices() == 7);
+
+ std::clog << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl;
+ BOOST_CHECK(simplex_tree_60.dimension() == 2);
+
+ std::clog << "simplex_tree_60.num_vertices()=" << simplex_tree_60.num_vertices() << std::endl;
+ BOOST_CHECK(simplex_tree_60.num_vertices() == 7);
+
+ std::clog << "simplex_tree_60.num_simplices()=" << simplex_tree_60.num_simplices() << std::endl;
+ BOOST_CHECK(simplex_tree_60.num_simplices() == 25);
+
+ max_alpha_square_value = 59.0;
+ std::clog << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" <<
+ max_alpha_square_value << "==========" << std::endl;
+
+ Gudhi::Simplex_tree<> simplex_tree_59;
+ BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_59, max_alpha_square_value));
+
+ std::clog << "alpha_complex_from_file.num_vertices()=" << alpha_complex_from_file.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_file.num_vertices() == 7);
+
+ std::clog << "simplex_tree_59.dimension()=" << simplex_tree_59.dimension() << std::endl;
+ BOOST_CHECK(simplex_tree_59.dimension() == 2);
+
+ std::clog << "simplex_tree_59.num_vertices()=" << simplex_tree_59.num_vertices() << std::endl;
+ BOOST_CHECK(simplex_tree_59.num_vertices() == 7);
+
+ std::clog << "simplex_tree_59.num_simplices()=" << simplex_tree_59.num_simplices() << std::endl;
+ BOOST_CHECK(simplex_tree_59.num_simplices() == 23);
+}
+
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, list_of_kernel_variants) {
+ std::clog << "========== Alpha_complex_from_empty_points ==========" << std::endl;
+
+ // ----------------------------------------------------------------------------
+ // Init of an empty list of points
+ // ----------------------------------------------------------------------------
+ std::vector<typename TestedKernel::Point_d> points;
+
+ // ----------------------------------------------------------------------------
+ // Init of an alpha complex from the list of points
+ // ----------------------------------------------------------------------------
+ Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_points(points);
+
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() == points.size());
+
+ // Test to the limit
+ BOOST_CHECK_THROW (alpha_complex_from_points.get_point(0), std::out_of_range);
+
+ Gudhi::Simplex_tree<> simplex_tree;
+ BOOST_CHECK(!alpha_complex_from_points.create_complex(simplex_tree));
+
+ std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
+ BOOST_CHECK(simplex_tree.num_simplices() == 0);
+
+ std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
+ BOOST_CHECK(simplex_tree.dimension() == -1);
+
+ std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
+ BOOST_CHECK(simplex_tree.num_vertices() == points.size());
+}
diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
index 27b671dd..b474917f 100644
--- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
@@ -13,75 +13,17 @@
#include <boost/test/unit_test.hpp>
#include <boost/mpl/list.hpp>
-#include <CGAL/Delaunay_triangulation.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Epeck_d.h>
-#include <cmath> // float comparison
-#include <limits>
+#include <stdexcept> // std::out_of_range
#include <string>
#include <vector>
#include <gudhi/Alpha_complex.h>
-// to construct a simplex_tree from Delaunay_triangulation
-#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Unitary_tests_utils.h>
-// Use dynamic_dimension_tag for the user to be able to set dimension
-typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d;
-// Use static dimension_tag for the user not to be able to set dimension
-typedef CGAL::Epeck_d< CGAL::Dimension_tag<3> > Exact_kernel_s;
-// Use dynamic_dimension_tag for the user to be able to set dimension
-typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d;
-// Use static dimension_tag for the user not to be able to set dimension
-typedef CGAL::Epick_d< CGAL::Dimension_tag<3> > Inexact_kernel_s;
-// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter
-
-typedef boost::mpl::list<Exact_kernel_d, Exact_kernel_s, Inexact_kernel_d, Inexact_kernel_s> list_of_kernel_variants;
-
-BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) {
- // ----------------------------------------------------------------------------
- //
- // Init of an alpha-complex from a OFF file
- //
- // ----------------------------------------------------------------------------
- std::string off_file_name("alphacomplexdoc.off");
- double max_alpha_square_value = 60.0;
- std::cout << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" <<
- max_alpha_square_value << "==========" << std::endl;
-
- Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_file(off_file_name);
-
- Gudhi::Simplex_tree<> simplex_tree_60;
- BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_60, max_alpha_square_value));
-
- std::cout << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl;
- BOOST_CHECK(simplex_tree_60.dimension() == 2);
-
- std::cout << "simplex_tree_60.num_vertices()=" << simplex_tree_60.num_vertices() << std::endl;
- BOOST_CHECK(simplex_tree_60.num_vertices() == 7);
-
- std::cout << "simplex_tree_60.num_simplices()=" << simplex_tree_60.num_simplices() << std::endl;
- BOOST_CHECK(simplex_tree_60.num_simplices() == 25);
-
- max_alpha_square_value = 59.0;
- std::cout << "========== OFF FILE NAME = " << off_file_name << " - alpha²=" <<
- max_alpha_square_value << "==========" << std::endl;
-
- Gudhi::Simplex_tree<> simplex_tree_59;
- BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_59, max_alpha_square_value));
-
- std::cout << "simplex_tree_59.dimension()=" << simplex_tree_59.dimension() << std::endl;
- BOOST_CHECK(simplex_tree_59.dimension() == 2);
-
- std::cout << "simplex_tree_59.num_vertices()=" << simplex_tree_59.num_vertices() << std::endl;
- BOOST_CHECK(simplex_tree_59.num_vertices() == 7);
-
- std::cout << "simplex_tree_59.num_simplices()=" << simplex_tree_59.num_simplices() << std::endl;
- BOOST_CHECK(simplex_tree_59.num_simplices() == 23);
-}
-
// Use static dimension_tag for the user not to be able to set dimension
typedef CGAL::Epeck_d< CGAL::Dimension_tag<4> > Kernel_4;
typedef Kernel_4::Point_d Point_4;
@@ -115,30 +57,33 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
// ----------------------------------------------------------------------------
Gudhi::alpha_complex::Alpha_complex<Kernel_4> alpha_complex_from_points(points);
- std::cout << "========== Alpha_complex_from_points ==========" << std::endl;
+ std::clog << "========== Alpha_complex_from_points ==========" << std::endl;
Gudhi::Simplex_tree<> simplex_tree;
BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() == points.size());
+
// Another way to check num_simplices
- std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
num_simplices++;
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
BOOST_CHECK(num_simplices == 15);
- std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
+ std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
BOOST_CHECK(simplex_tree.num_simplices() == 15);
- std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
+ std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
BOOST_CHECK(simplex_tree.dimension() == 3);
- std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
+ std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
BOOST_CHECK(simplex_tree.num_vertices() == points.size());
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
@@ -162,22 +107,22 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
}
Point_4 p0 = alpha_complex_from_points.get_point(0);
- std::cout << "alpha_complex_from_points.get_point(0)=" << p0 << std::endl;
+ std::clog << "alpha_complex_from_points.get_point(0)=" << p0 << std::endl;
BOOST_CHECK(4 == p0.dimension());
BOOST_CHECK(is_point_in_list(points, p0));
Point_4 p1 = alpha_complex_from_points.get_point(1);
- std::cout << "alpha_complex_from_points.get_point(1)=" << p1 << std::endl;
+ std::clog << "alpha_complex_from_points.get_point(1)=" << p1 << std::endl;
BOOST_CHECK(4 == p1.dimension());
BOOST_CHECK(is_point_in_list(points, p1));
Point_4 p2 = alpha_complex_from_points.get_point(2);
- std::cout << "alpha_complex_from_points.get_point(2)=" << p2 << std::endl;
+ std::clog << "alpha_complex_from_points.get_point(2)=" << p2 << std::endl;
BOOST_CHECK(4 == p2.dimension());
BOOST_CHECK(is_point_in_list(points, p2));
Point_4 p3 = alpha_complex_from_points.get_point(3);
- std::cout << "alpha_complex_from_points.get_point(3)=" << p3 << std::endl;
+ std::clog << "alpha_complex_from_points.get_point(3)=" << p3 << std::endl;
BOOST_CHECK(4 == p3.dimension());
BOOST_CHECK(is_point_in_list(points, p3));
@@ -188,30 +133,27 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
// Test after prune_above_filtration
bool modified = simplex_tree.prune_above_filtration(0.6);
- if (modified) {
- simplex_tree.initialize_filtration();
- }
BOOST_CHECK(modified);
// Another way to check num_simplices
- std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
num_simplices = 0;
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
num_simplices++;
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
BOOST_CHECK(num_simplices == 10);
- std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
+ std::clog << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
BOOST_CHECK(simplex_tree.num_simplices() == 10);
- std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
+ std::clog << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
BOOST_CHECK(simplex_tree.dimension() == 1);
- std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
+ std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
BOOST_CHECK(simplex_tree.num_vertices() == 4);
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
@@ -230,41 +172,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
}
-BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, list_of_kernel_variants) {
- std::cout << "========== Alpha_complex_from_empty_points ==========" << std::endl;
-
- // ----------------------------------------------------------------------------
- // Init of an empty list of points
- // ----------------------------------------------------------------------------
- std::vector<typename TestedKernel::Point_d> points;
-
- // ----------------------------------------------------------------------------
- // Init of an alpha complex from the list of points
- // ----------------------------------------------------------------------------
- Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_points(points);
-
- // Test to the limit
- BOOST_CHECK_THROW (alpha_complex_from_points.get_point(0), std::out_of_range);
-
- Gudhi::Simplex_tree<> simplex_tree;
- BOOST_CHECK(!alpha_complex_from_points.create_complex(simplex_tree));
-
- std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
- BOOST_CHECK(simplex_tree.num_simplices() == 0);
-
- std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
- BOOST_CHECK(simplex_tree.dimension() == -1);
-
- std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
- BOOST_CHECK(simplex_tree.num_vertices() == points.size());
-}
using Inexact_kernel_2 = CGAL::Epick_d< CGAL::Dimension_tag<2> >;
using Exact_kernel_2 = CGAL::Epeck_d< CGAL::Dimension_tag<2> >;
using list_of_kernel_2_variants = boost::mpl::list<Inexact_kernel_2, Exact_kernel_2>;
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel, list_of_kernel_2_variants) {
- std::cout << "========== Alpha_complex_with_duplicated_points ==========" << std::endl;
+ std::clog << "========== Alpha_complex_with_duplicated_points ==========" << std::endl;
using Point = typename TestedKernel::Point_d;
using Vector_of_points = std::vector<Point>;
@@ -287,14 +201,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel
// ----------------------------------------------------------------------------
// Init of an alpha complex from the list of points
// ----------------------------------------------------------------------------
- std::cout << "Init" << std::endl;
+ std::clog << "Init" << std::endl;
Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_points(points);
Gudhi::Simplex_tree<> simplex_tree;
- std::cout << "create_complex" << std::endl;
+ std::clog << "create_complex" << std::endl;
BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
- std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices()
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() < points.size());
+
+ std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices()
<< std::endl;
BOOST_CHECK(simplex_tree.num_vertices() < points.size());
}
diff --git a/src/Alpha_complex/test/Alpha_kernel_d_unit_test.cpp b/src/Alpha_complex/test/Alpha_kernel_d_unit_test.cpp
new file mode 100644
index 00000000..6da4c084
--- /dev/null
+++ b/src/Alpha_complex/test/Alpha_kernel_d_unit_test.cpp
@@ -0,0 +1,109 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "alpha_kernel_d"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+#include <CGAL/NT_converter.h>
+
+#include <iostream>
+#include <vector>
+#include <utility> // for std::pair
+
+#include <gudhi/Alpha_complex/Alpha_kernel_d.h>
+#include <gudhi/Unitary_tests_utils.h>
+
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dimension_tag<4> > Exact_kernel_s;
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dimension_tag<4> > Inexact_kernel_s;
+// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter
+
+typedef boost::mpl::list<Exact_kernel_d, Exact_kernel_s, Inexact_kernel_d, Inexact_kernel_s> list_of_kernel_variants;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_kernel_d_dimension, TestedKernel, list_of_kernel_variants) {
+ // Test for a point (weighted or not) in 4d, that the dimension is 4.
+
+ Gudhi::alpha_complex::Alpha_kernel_d<TestedKernel, false> kernel;
+ std::vector<double> p0 {0., 1., 2., 3.};
+ typename TestedKernel::Point_d p0_d(p0.begin(), p0.end());
+
+ std::clog << "Dimension is " << kernel.get_dimension(p0_d) << std::endl;
+ BOOST_CHECK(kernel.get_dimension(p0_d) == 4);
+
+ Gudhi::alpha_complex::Alpha_kernel_d<TestedKernel, true> w_kernel;
+ typename TestedKernel::Weighted_point_d w_p0_d(p0_d, 10.);
+
+ std::clog << "Dimension is " << w_kernel.get_dimension(w_p0_d) << std::endl;
+ BOOST_CHECK(w_kernel.get_dimension(w_p0_d) == 4);
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_kernel_d_sphere, TestedKernel, list_of_kernel_variants) {
+ // Test with 5 points on a 3-sphere, that get_sphere returns the same center and squared radius
+ // for dD unweighted and for dD weighted with all weights at 0.
+
+ using Unweighted_kernel = Gudhi::alpha_complex::Alpha_kernel_d<TestedKernel, false>;
+ // Sphere: (x-1)² + (y-1)² + z² + t² = 1
+ // At least 5 points for a 3-sphere
+ std::vector<double> p0 {1., 0., 0., 0.};
+ std::vector<double> p1 {0., 1., 0., 0.};
+ std::vector<double> p2 {1., 1., 1., 0.};
+ std::vector<double> p3 {1., 1., 0., 1.};
+ std::vector<double> p4 {1., 1., -1., 0.};
+
+ using Point_d = typename Unweighted_kernel::Point_d;
+ std::vector<Point_d> unw_pts;
+ unw_pts.emplace_back(p0.begin(), p0.end());
+ unw_pts.emplace_back(p1.begin(), p1.end());
+ unw_pts.emplace_back(p2.begin(), p2.end());
+ unw_pts.emplace_back(p3.begin(), p3.end());
+ unw_pts.emplace_back(p4.begin(), p4.end());
+
+ Unweighted_kernel kernel;
+ auto unw_sphere = kernel.get_sphere(unw_pts.cbegin(), unw_pts.cend());
+
+ std::clog << "Center is " << unw_sphere.first << " - squared radius is " << unw_sphere.second << std::endl;
+
+ using Weighted_kernel = Gudhi::alpha_complex::Alpha_kernel_d<TestedKernel, true>;
+
+ using Weighted_point_d = typename Weighted_kernel::Weighted_point_d;
+ using Bare_point_d = typename Weighted_kernel::Bare_point_d;
+ std::vector<Weighted_point_d> w_pts;
+ w_pts.emplace_back(Bare_point_d(p0.begin(), p0.end()), 0.);
+ w_pts.emplace_back(Bare_point_d(p1.begin(), p1.end()), 0.);
+ w_pts.emplace_back(Bare_point_d(p2.begin(), p2.end()), 0.);
+ w_pts.emplace_back(Bare_point_d(p3.begin(), p3.end()), 0.);
+ w_pts.emplace_back(Bare_point_d(p4.begin(), p4.end()), 0.);
+
+ Weighted_kernel w_kernel;
+ auto w_sphere = w_kernel.get_sphere(w_pts.cbegin(), w_pts.cend());
+
+ std::clog << "Center is " << w_sphere.point() << " - squared radius is " << w_sphere.weight() << std::endl;
+
+ CGAL::NT_converter<typename Weighted_kernel::FT, double> cast_to_double;
+ // The results shall be the same with weights = 0.
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(cast_to_double(unw_sphere.second), cast_to_double(w_sphere.weight()));
+ BOOST_CHECK(unw_sphere.first == w_sphere.point());
+
+ auto unw_sq_rd = kernel.get_squared_radius(unw_pts.cbegin(), unw_pts.cend());
+ std::clog << "Squared radius is " << unw_sq_rd << std::endl;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(cast_to_double(unw_sphere.second), cast_to_double(unw_sq_rd));
+ auto w_sq_rd = w_kernel.get_squared_radius(w_pts.cbegin(), w_pts.cend());
+ std::clog << "Squared radius is " << w_sq_rd << std::endl;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(cast_to_double(w_sphere.weight()), cast_to_double(w_sq_rd));
+}
diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt
index 0476c6d4..dd2c235f 100644
--- a/src/Alpha_complex/test/CMakeLists.txt
+++ b/src/Alpha_complex/test/CMakeLists.txt
@@ -8,11 +8,23 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable ( Alpha_complex_test_unit Alpha_complex_unit_test.cpp )
target_link_libraries(Alpha_complex_test_unit ${CGAL_LIBRARY})
+ add_executable ( Alpha_complex_dim3_test_unit Alpha_complex_dim3_unit_test.cpp )
+ target_link_libraries(Alpha_complex_dim3_test_unit ${CGAL_LIBRARY})
+ add_executable ( Delaunay_complex_test_unit Delaunay_complex_unit_test.cpp )
+ target_link_libraries(Delaunay_complex_test_unit ${CGAL_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Alpha_complex_test_unit ${TBB_LIBRARIES})
+ target_link_libraries(Alpha_complex_dim3_test_unit ${TBB_LIBRARIES})
+ target_link_libraries(Delaunay_complex_test_unit ${TBB_LIBRARIES})
endif()
gudhi_add_boost_test(Alpha_complex_test_unit)
+ gudhi_add_boost_test(Alpha_complex_dim3_test_unit)
+ gudhi_add_boost_test(Delaunay_complex_test_unit)
+
+endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+
+if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
add_executable ( Alpha_complex_3d_test_unit Alpha_complex_3d_unit_test.cpp )
target_link_libraries(Alpha_complex_3d_test_unit ${CGAL_LIBRARY})
@@ -34,4 +46,35 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
gudhi_add_boost_test(Periodic_alpha_complex_3d_test_unit)
gudhi_add_boost_test(Weighted_periodic_alpha_complex_3d_test_unit)
-endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ add_executable ( Alpha_kernel_d_test_unit Alpha_kernel_d_unit_test.cpp )
+ target_link_libraries(Alpha_kernel_d_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Alpha_kernel_d_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Alpha_kernel_d_test_unit)
+
+ add_executable ( Weighted_alpha_complex_test_unit Weighted_alpha_complex_unit_test.cpp )
+ target_link_libraries(Weighted_alpha_complex_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Weighted_alpha_complex_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Weighted_alpha_complex_test_unit)
+
+ add_executable ( Weighted_alpha_complex_non_visible_points_test_unit Weighted_alpha_complex_non_visible_points_unit_test.cpp )
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Weighted_alpha_complex_non_visible_points_test_unit)
+
+ add_executable ( Zero_weighted_alpha_complex_test_unit Zero_weighted_alpha_complex_unit_test.cpp )
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Zero_weighted_alpha_complex_test_unit)
+
+endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
diff --git a/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp
new file mode 100644
index 00000000..c1cc1fab
--- /dev/null
+++ b/src/Alpha_complex/test/Delaunay_complex_unit_test.cpp
@@ -0,0 +1,68 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "delaunay_complex"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+#include <limits> // NaN
+#include <cmath>
+
+#include <gudhi/Alpha_complex.h>
+// to construct a simplex_tree from Delaunay_triangulation
+#include <gudhi/graph_simplicial_complex.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Unitary_tests_utils.h>
+#include <gudhi/random_point_generators.h>
+
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epeck_d< CGAL::Dimension_tag<5> > Exact_kernel_s;
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dimension_tag<5> > Inexact_kernel_s;
+// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter
+
+typedef boost::mpl::list<Exact_kernel_d, Exact_kernel_s, Inexact_kernel_d, Inexact_kernel_s> list_of_kernel_variants;
+
+using Simplex_tree = Gudhi::Simplex_tree<>;
+using Simplex_handle = Simplex_tree::Simplex_handle;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) {
+ std::cout << "*****************************************************************************************************";
+ using Point = typename TestedKernel::Point_d;
+ std::vector<Point> points;
+ // 50 points on a 4-sphere
+ points = Gudhi::generate_points_on_sphere_d<TestedKernel>(10, 5, 1.);
+
+ Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex(points);
+
+ // Alpha complex
+ Simplex_tree stree_from_alpha_complex;
+ BOOST_CHECK(alpha_complex.create_complex(stree_from_alpha_complex));
+
+ // Delaunay complex
+ Simplex_tree stree_from_delaunay_complex;
+ BOOST_CHECK(alpha_complex.create_complex(stree_from_delaunay_complex, 0., false, true));
+
+ // Check all the simplices from alpha complex are in the Delaunay complex
+ for (auto f_simplex : stree_from_alpha_complex.complex_simplex_range()) {
+ Simplex_handle sh = stree_from_delaunay_complex.find(stree_from_alpha_complex.simplex_vertex_range(f_simplex));
+ BOOST_CHECK(std::isnan(stree_from_delaunay_complex.filtration(sh)));
+ BOOST_CHECK(sh != stree_from_delaunay_complex.null_simplex());
+ }
+}
diff --git a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
index 731763fa..9eef920b 100644
--- a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
@@ -43,14 +43,14 @@ typedef boost::mpl::list<Fast_periodic_alpha_complex_3d, Safe_periodic_alpha_com
periodic_variants_type_list;
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_periodic_throw, Periodic_alpha_complex_3d, periodic_variants_type_list) {
- std::cout << "Periodic alpha complex 3d exception throw" << std::endl;
+ std::clog << "Periodic alpha complex 3d exception throw" << std::endl;
using Bare_point_3 = typename Periodic_alpha_complex_3d::Bare_point_3;
std::vector<Bare_point_3> p_points;
// Not important, this is not what we want to check
p_points.push_back(Bare_point_3(0.0, 0.0, 0.0));
- std::cout << "Check exception throw in debug mode" << std::endl;
+ std::clog << "Check exception throw in debug mode" << std::endl;
// Check it throws an exception when the cuboid is not iso
BOOST_CHECK_THROW(Periodic_alpha_complex_3d periodic_alpha_complex(p_points, 0., 0., 0., 0.9, 1., 1.),
std::invalid_argument);
@@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ---------------------
// Fast periodic version
// ---------------------
- std::cout << "Fast periodic alpha complex 3d" << std::endl;
+ std::clog << "Fast periodic alpha complex 3d" << std::endl;
using Creator = CGAL::Creator_uniform_3<double, Fast_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
@@ -106,7 +106,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ----------------------
// Exact periodic version
// ----------------------
- std::cout << "Exact periodic alpha complex 3d" << std::endl;
+ std::clog << "Exact periodic alpha complex 3d" << std::endl;
std::vector<Exact_periodic_alpha_complex_3d::Bare_point_3> e_p_points;
@@ -122,13 +122,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ---------------------
// Compare both versions
// ---------------------
- std::cout << "Exact periodic alpha complex 3d is of dimension " << exact_stree.dimension() << " - Non exact is "
+ std::clog << "Exact periodic alpha complex 3d is of dimension " << exact_stree.dimension() << " - Non exact is "
<< stree.dimension() << std::endl;
BOOST_CHECK(exact_stree.dimension() == stree.dimension());
- std::cout << "Exact periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Non exact is "
+ std::clog << "Exact periodic alpha complex 3d num_simplices " << exact_stree.num_simplices() << " - Non exact is "
<< stree.num_simplices() << std::endl;
BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices());
- std::cout << "Exact periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Non exact is "
+ std::clog << "Exact periodic alpha complex 3d num_vertices " << exact_stree.num_vertices() << " - Non exact is "
<< stree.num_vertices() << std::endl;
BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices());
@@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ----------------------
// Safe periodic version
// ----------------------
- std::cout << "Safe periodic alpha complex 3d" << std::endl;
+ std::clog << "Safe periodic alpha complex 3d" << std::endl;
std::vector<Safe_periodic_alpha_complex_3d::Bare_point_3> s_p_points;
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
index 8035f6e8..6b31bea6 100644
--- a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
@@ -55,13 +55,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_throw, Weighted_alpha_compl
// weights size is different from w_points size to make weighted Alpha_complex_3d throw in debug mode
std::vector<double> weights = {0.01, 0.005, 0.006, 0.01, 0.009, 0.001};
- std::cout << "Check exception throw in debug mode" << std::endl;
+ std::clog << "Check exception throw in debug mode" << std::endl;
BOOST_CHECK_THROW(Weighted_alpha_complex_3d wac(w_points, weights), std::invalid_argument);
}
#endif
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, weighted_variants_type_list) {
- std::cout << "Weighted alpha complex 3d from points and weights" << std::endl;
+ std::clog << "Weighted alpha complex 3d from points and weights" << std::endl;
using Bare_point_3 = typename Weighted_alpha_complex_3d::Bare_point_3;
std::vector<Bare_point_3> w_points;
w_points.push_back(Bare_point_3(0.0, 0.0, 0.0));
@@ -78,7 +78,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d,
Gudhi::Simplex_tree<> stree;
alpha_complex_p_a_w.create_complex(stree);
- std::cout << "Weighted alpha complex 3d from weighted points" << std::endl;
+ std::clog << "Weighted alpha complex 3d from weighted points" << std::endl;
using Weighted_point_3 = typename Weighted_alpha_complex_3d::Weighted_point_3;
std::vector<Weighted_point_3> weighted_points;
@@ -112,13 +112,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d,
// ---------------------
// Compare both versions
// ---------------------
- std::cout << "Weighted alpha complex 3d is of dimension " << stree_bis.dimension() << " - versus "
+ std::clog << "Weighted alpha complex 3d is of dimension " << stree_bis.dimension() << " - versus "
<< stree.dimension() << std::endl;
BOOST_CHECK(stree_bis.dimension() == stree.dimension());
- std::cout << "Weighted alpha complex 3d num_simplices " << stree_bis.num_simplices() << " - versus "
+ std::clog << "Weighted alpha complex 3d num_simplices " << stree_bis.num_simplices() << " - versus "
<< stree.num_simplices() << std::endl;
BOOST_CHECK(stree_bis.num_simplices() == stree.num_simplices());
- std::cout << "Weighted alpha complex 3d num_vertices " << stree_bis.num_vertices() << " - versus "
+ std::clog << "Weighted alpha complex 3d num_vertices " << stree_bis.num_vertices() << " - versus "
<< stree.num_vertices() << std::endl;
BOOST_CHECK(stree_bis.num_vertices() == stree.num_vertices());
@@ -127,18 +127,18 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d,
std::vector<int> simplex;
std::vector<int> exact_simplex;
#ifdef DEBUG_TRACES
- std::cout << " ( ";
+ std::clog << " ( ";
#endif
for (auto vertex : stree.simplex_vertex_range(*sh)) {
simplex.push_back(vertex);
#ifdef DEBUG_TRACES
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
#endif
}
#ifdef DEBUG_TRACES
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << stree.filtration(*sh) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
#endif
// Find it in the exact structure
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
new file mode 100644
index 00000000..dd83c1da
--- /dev/null
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
@@ -0,0 +1,60 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "weighted_alpha_complex_non_visible_points"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+
+
+using list_of_1d_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<1>>,
+ CGAL::Epick_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epick_d< CGAL::Dimension_tag<1>>
+ >;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) {
+ // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight,
+ // that the point with a small weight has the same high filtration value than the edge formed by the 2 points
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::vector<double> p1 {0.};
+ points.emplace_back(p1.begin(), p1.end());
+ // closed enough points
+ std::vector<double> p2 {0.1};
+ points.emplace_back(p2.begin(), p2.end());
+ std::vector<typename Kernel::FT> weights {100., 0.01};
+
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex(points, weights);
+ Gudhi::Simplex_tree<> stree;
+ BOOST_CHECK(alpha_complex.create_complex(stree));
+
+ std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : stree.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(stree.filtration(stree.find({0})) == -100.);
+ BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1})));
+ BOOST_CHECK(stree.filtration(stree.find({1})) > 100000);
+} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
new file mode 100644
index 00000000..875704ee
--- /dev/null
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
@@ -0,0 +1,127 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "weighted_alpha_complex"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+#include <random>
+#include <array>
+#include <cmath> // for std::fabs
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Alpha_complex_3d.h>
+#include <gudhi/Simplex_tree.h>
+
+BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) {
+ // check that for random weighted 3d points in safe mode the 3D and dD codes give the same result with some tolerance
+
+ // Random points construction
+ using Kernel_dD = CGAL::Epeck_d< CGAL::Dimension_tag<3> >;
+ using Bare_point_d = typename Kernel_dD::Point_d;
+ using Weighted_point_d = typename Kernel_dD::Weighted_point_d;
+ std::vector<Weighted_point_d> w_points_d;
+
+ using Exact_weighted_alpha_complex_3d =
+ Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::EXACT, true, false>;
+ using Bare_point_3 = typename Exact_weighted_alpha_complex_3d::Bare_point_3;
+ using Weighted_point_3 = typename Exact_weighted_alpha_complex_3d::Weighted_point_3;
+ std::vector<Weighted_point_3> w_points_3;
+
+ std::uniform_real_distribution<double> rd_pts(-10., 10.);
+ std::uniform_real_distribution<double> rd_wghts(-0.5, 0.5);
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ for (int idx = 0; idx < 20; idx++) {
+ std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
+ double weight = rd_wghts(rand_engine);
+ w_points_d.emplace_back(Bare_point_d(point.begin(), point.end()), weight);
+ w_points_3.emplace_back(Bare_point_3(point[0], point[1], point[2]), weight);
+ }
+
+ // Structures necessary for comparison
+ using Points = std::vector<std::array<double,3>>;
+ using Points_and_filtrations = std::map<Points, double>;
+ Points_and_filtrations pts_fltr_dD;
+ Points_and_filtrations pts_fltr_3d;
+
+ // Weighted alpha complex for dD version
+ Gudhi::alpha_complex::Alpha_complex<Kernel_dD, true> alpha_complex_dD_from_weighted_points(w_points_d);
+ Gudhi::Simplex_tree<> w_simplex_d;
+ BOOST_CHECK(alpha_complex_dD_from_weighted_points.create_complex(w_simplex_d));
+
+ std::clog << "Iterator on weighted alpha complex dD simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : w_simplex_d.filtration_simplex_range()) {
+ Points points;
+ for (auto vertex : w_simplex_d.simplex_vertex_range(f_simplex)) {
+ CGAL::NT_converter<Kernel_dD::RT, double> cgal_converter;
+ Bare_point_d pt = alpha_complex_dD_from_weighted_points.get_point(vertex).point();
+ points.push_back({cgal_converter(pt[0]), cgal_converter(pt[1]), cgal_converter(pt[2])});
+ }
+ std::clog << " ( ";
+ std::sort (points.begin(), points.end());
+ for (auto point : points) {
+ std::clog << point[0] << " " << point[1] << " " << point[2] << " | ";
+ }
+ std::clog << ") -> " << "[" << w_simplex_d.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
+ pts_fltr_dD[points] = w_simplex_d.filtration(f_simplex);
+ }
+
+ // Weighted alpha complex for 3D version
+ Exact_weighted_alpha_complex_3d alpha_complex_3D_from_weighted_points(w_points_3);
+ Gudhi::Simplex_tree<> w_simplex_3;
+ BOOST_CHECK(alpha_complex_3D_from_weighted_points.create_complex(w_simplex_3));
+
+ std::clog << "Iterator on weighted alpha complex 3D simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : w_simplex_3.filtration_simplex_range()) {
+ Points points;
+ for (auto vertex : w_simplex_3.simplex_vertex_range(f_simplex)) {
+ Bare_point_3 pt = alpha_complex_3D_from_weighted_points.get_point(vertex).point();
+ CGAL::NT_converter<Exact_weighted_alpha_complex_3d::Kernel::RT, double> cgal_converter;
+ points.push_back({cgal_converter(pt[0]), cgal_converter(pt[1]), cgal_converter(pt[2])});
+ }
+ std::clog << " ( ";
+ std::sort (points.begin(), points.end());
+ for (auto point : points) {
+ std::clog << point[0] << " " << point[1] << " " << point[2] << " | ";
+ }
+ std::clog << ") -> " << "[" << w_simplex_3.filtration(f_simplex) << "] " << std::endl;
+ pts_fltr_3d[points] = w_simplex_d.filtration(f_simplex);
+ }
+
+ // Compares structures
+ auto d3_itr = pts_fltr_3d.begin();
+ auto dD_itr = pts_fltr_dD.begin();
+ for (; d3_itr != pts_fltr_3d.end() && dD_itr != pts_fltr_dD.end(); ++d3_itr) {
+ if (d3_itr->first != dD_itr->first) {
+ for(auto point : d3_itr->first)
+ std::clog << point[0] << " " << point[1] << " " << point[2] << " | ";
+ std::clog << " versus ";
+ for(auto point : dD_itr->first)
+ std::clog << point[0] << " " << point[1] << " " << point[2] << " | ";
+ std::clog << std::endl;
+ BOOST_CHECK(false);
+ }
+ // In safe mode, relative error is less than 1e-5 (can be changed with set_relative_precision_of_to_double)
+ if (std::fabs(d3_itr->second - dD_itr->second) > 1e-5 * (std::fabs(d3_itr->second) + std::fabs(dD_itr->second))) {
+ std::clog << d3_itr->second << " versus " << dD_itr->second << " diff "
+ << std::fabs(d3_itr->second - dD_itr->second) << std::endl;
+ BOOST_CHECK(false);
+ }
+ ++dD_itr;
+ }
+} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
index b09e92d5..610b9f3d 100644
--- a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
@@ -45,7 +45,7 @@ typedef boost::mpl::list<Fast_weighted_periodic_alpha_complex_3d, Exact_weighted
#ifdef GUDHI_DEBUG
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_periodic_alpha_complex_3d,
wp_variants_type_list) {
- std::cout << "Weighted periodic alpha complex 3d exception throw" << std::endl;
+ std::clog << "Weighted periodic alpha complex 3d exception throw" << std::endl;
using Creator = CGAL::Creator_uniform_3<double, typename Weighted_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
@@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe
p_weights.push_back(random.get_double(0., 0.01));
}
- std::cout << "Cuboid is not iso exception" << std::endl;
+ std::clog << "Cuboid is not iso exception" << std::endl;
// Check it throws an exception when the cuboid is not iso
BOOST_CHECK_THROW(
Weighted_periodic_alpha_complex_3d wp_alpha_complex(wp_points, p_weights, -1., -1., -1., 0.9, 1., 1.),
@@ -83,7 +83,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe
Weighted_periodic_alpha_complex_3d wp_alpha_complex(wp_points, p_weights, -1., -1., -1., 1., 1., 1.1),
std::invalid_argument);
- std::cout << "0 <= point.weight() < 1/64 * domain_size * domain_size exception" << std::endl;
+ std::clog << "0 <= point.weight() < 1/64 * domain_size * domain_size exception" << std::endl;
// Weights must be in range ]0, 1/64 = 0.015625[
double temp = p_weights[25];
p_weights[25] = 1.0;
@@ -97,7 +97,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe
std::invalid_argument);
p_weights[14] = temp;
- std::cout << "wp_points and p_weights size exception" << std::endl;
+ std::clog << "wp_points and p_weights size exception" << std::endl;
// Weights and points must have the same size
// + 1
p_weights.push_back(1e-10);
@@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ---------------------
// Fast weighted periodic version
// ---------------------
- std::cout << "Fast weighted periodic alpha complex 3d" << std::endl;
+ std::clog << "Fast weighted periodic alpha complex 3d" << std::endl;
using Creator = CGAL::Creator_uniform_3<double, Fast_weighted_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
@@ -140,7 +140,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ----------------------
// Exact weighted periodic version
// ----------------------
- std::cout << "Exact weighted periodic alpha complex 3d" << std::endl;
+ std::clog << "Exact weighted periodic alpha complex 3d" << std::endl;
std::vector<Exact_weighted_periodic_alpha_complex_3d::Bare_point_3> e_p_points;
@@ -156,13 +156,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ---------------------
// Compare both versions
// ---------------------
- std::cout << "Exact weighted periodic alpha complex 3d is of dimension " << exact_stree.dimension()
+ std::clog << "Exact weighted periodic alpha complex 3d is of dimension " << exact_stree.dimension()
<< " - Non exact is " << stree.dimension() << std::endl;
BOOST_CHECK(exact_stree.dimension() == stree.dimension());
- std::cout << "Exact weighted periodic alpha complex 3d num_simplices " << exact_stree.num_simplices()
+ std::clog << "Exact weighted periodic alpha complex 3d num_simplices " << exact_stree.num_simplices()
<< " - Non exact is " << stree.num_simplices() << std::endl;
BOOST_CHECK(exact_stree.num_simplices() == stree.num_simplices());
- std::cout << "Exact weighted periodic alpha complex 3d num_vertices " << exact_stree.num_vertices()
+ std::clog << "Exact weighted periodic alpha complex 3d num_vertices " << exact_stree.num_vertices()
<< " - Non exact is " << stree.num_vertices() << std::endl;
BOOST_CHECK(exact_stree.num_vertices() == stree.num_vertices());
@@ -189,7 +189,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ----------------------
// Safe weighted periodic version
// ----------------------
- std::cout << "Safe weighted periodic alpha complex 3d" << std::endl;
+ std::clog << "Safe weighted periodic alpha complex 3d" << std::endl;
std::vector<Safe_weighted_periodic_alpha_complex_3d::Bare_point_3> s_p_points;
diff --git a/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
new file mode 100644
index 00000000..b7df07c7
--- /dev/null
+++ b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
@@ -0,0 +1,77 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "zero_weighted_alpha_complex"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+#include <random>
+#include <cmath> // for std::fabs
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Unitary_tests_utils.h>
+
+using list_of_exact_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<4> >
+ > ;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) {
+ // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights
+ // 0 give exactly the same simplex tree (simplices and filtration values).
+
+ // Random points construction
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::uniform_real_distribution<double> rd_pts(-10., 10.);
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ for (int idx = 0; idx < 20; idx++) {
+ std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
+ points.emplace_back(point.begin(), point.end());
+ }
+
+ // Alpha complex from points
+ Gudhi::alpha_complex::Alpha_complex<Kernel, false> alpha_complex_from_points(points);
+ Gudhi::Simplex_tree<> simplex;
+ Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits<Gudhi::Simplex_tree<>::Filtration_value>::infinity();
+ BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true));
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ // Alpha complex from zero weighted points
+ std::vector<typename Kernel::FT> weights(20, 0.);
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_zero_weighted_points(points, weights);
+ Gudhi::Simplex_tree<> zw_simplex;
+ BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true));
+
+ std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : zw_simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(zw_simplex == simplex);
+} \ No newline at end of file
diff --git a/src/Alpha_complex/utilities/CMakeLists.txt b/src/Alpha_complex/utilities/CMakeLists.txt
index 57b92942..303bd0a6 100644
--- a/src/Alpha_complex/utilities/CMakeLists.txt
+++ b/src/Alpha_complex/utilities/CMakeLists.txt
@@ -1,68 +1,82 @@
project(Alpha_complex_utilities)
-if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- add_executable (alpha_complex_persistence alpha_complex_persistence.cpp)
- target_link_libraries(alpha_complex_persistence ${CGAL_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY})
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ if (TARGET Boost::program_options)
+ add_executable (alpha_complex_persistence alpha_complex_persistence.cpp)
+ target_link_libraries(alpha_complex_persistence ${CGAL_LIBRARY} Boost::program_options)
- if (TBB_FOUND)
- target_link_libraries(alpha_complex_persistence ${TBB_LIBRARIES})
- endif(TBB_FOUND)
- add_test(NAME Alpha_complex_utilities_safe_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "safe.pers")
- add_test(NAME Alpha_complex_utilities_fast_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "fast.pers" "-f")
- add_test(NAME Alpha_complex_utilities_exact_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "exact.pers" "-e")
- if (DIFF_PATH)
- add_test(Alpha_complex_utilities_diff_exact_alpha_complex ${DIFF_PATH}
- "exact.pers" "safe.pers")
- add_test(Alpha_complex_utilities_diff_fast_alpha_complex ${DIFF_PATH}
- "fast.pers" "safe.pers")
- endif()
-
- install(TARGETS alpha_complex_persistence DESTINATION bin)
+ if (TBB_FOUND)
+ target_link_libraries(alpha_complex_persistence ${TBB_LIBRARIES})
+ endif(TBB_FOUND)
+ add_test(NAME Alpha_complex_utilities_safe_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "safe.pers")
+ add_test(NAME Alpha_complex_utilities_fast_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "fast.pers" "-f")
+ add_test(NAME Alpha_complex_utilities_exact_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "exact.pers" "-e")
+ if (DIFF_PATH)
+ add_test(Alpha_complex_utilities_diff_exact_alpha_complex ${DIFF_PATH}
+ "exact.pers" "safe.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex PROPERTIES DEPENDS
+ "Alpha_complex_utilities_exact_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence")
- add_executable(alpha_complex_3d_persistence alpha_complex_3d_persistence.cpp)
- target_link_libraries(alpha_complex_3d_persistence ${CGAL_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY})
- if (TBB_FOUND)
- target_link_libraries(alpha_complex_3d_persistence ${TBB_LIBRARIES})
- endif(TBB_FOUND)
+ add_test(Alpha_complex_utilities_diff_fast_alpha_complex ${DIFF_PATH}
+ "fast.pers" "safe.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex PROPERTIES DEPENDS
+ "Alpha_complex_utilities_fast_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence")
+ endif()
- add_test(NAME Alpha_complex_utilities_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "safe_3d.pers")
+ install(TARGETS alpha_complex_persistence DESTINATION bin)
+ endif()
+endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
- add_test(NAME Alpha_complex_utilities_exact_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "exact_3d.pers" "-e")
+if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ if (TARGET Boost::program_options)
+ add_executable(alpha_complex_3d_persistence alpha_complex_3d_persistence.cpp)
+ target_link_libraries(alpha_complex_3d_persistence ${CGAL_LIBRARY} Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(alpha_complex_3d_persistence ${TBB_LIBRARIES})
+ endif(TBB_FOUND)
- add_test(NAME Alpha_complex_utilities_safe_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "fast_3d.pers" "-f")
+ add_test(NAME Alpha_complex_utilities_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
+ "-p" "2" "-m" "0.45" "-o" "safe_3d.pers")
- if (DIFF_PATH)
- add_test(Alpha_complex_utilities_diff_exact_alpha_complex_3d ${DIFF_PATH}
- "exact_3d.pers" "safe_3d.pers")
- add_test(Alpha_complex_utilities_diff_fast_alpha_complex_3d ${DIFF_PATH}
- "fast_3d.pers" "safe_3d.pers")
- endif()
+ add_test(NAME Alpha_complex_utilities_exact_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
+ "-p" "2" "-m" "0.45" "-o" "exact_3d.pers" "-e")
- add_test(NAME Alpha_complex_utilities_periodic_alpha_complex_3d_persistence COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
- "-c" "${CMAKE_SOURCE_DIR}/data/points/iso_cuboid_3_in_0_1.txt"
- "-p" "2" "-m" "0")
+ add_test(NAME Alpha_complex_utilities_fast_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
+ "-p" "2" "-m" "0.45" "-o" "fast_3d.pers" "-f")
- add_test(NAME Alpha_complex_utilities_weighted_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
- "-w" "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.weights"
- "-p" "2" "-m" "0")
+ if (DIFF_PATH)
+ add_test(Alpha_complex_utilities_diff_exact_alpha_complex_3d ${DIFF_PATH}
+ "exact_3d.pers" "safe_3d.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex_3d PROPERTIES DEPENDS
+ "Alpha_complex_utilities_exact_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d")
+ add_test(Alpha_complex_utilities_diff_fast_alpha_complex_3d ${DIFF_PATH}
+ "fast_3d.pers" "safe_3d.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex_3d PROPERTIES DEPENDS
+ "Alpha_complex_utilities_fast_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d")
+ endif()
- add_test(NAME Alpha_complex_utilities_weighted_periodic_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
- "-w" "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.weights"
- "-c" "${CMAKE_SOURCE_DIR}/data/points/iso_cuboid_3_in_0_1.txt"
- "-p" "2" "-m" "0" "-e")
+ add_test(NAME Alpha_complex_utilities_periodic_alpha_complex_3d_persistence COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
+ "-c" "${CMAKE_SOURCE_DIR}/data/points/iso_cuboid_3_in_0_1.txt"
+ "-p" "2" "-m" "0")
- install(TARGETS alpha_complex_3d_persistence DESTINATION bin)
+ add_test(NAME Alpha_complex_utilities_weighted_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
+ "-w" "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.weights"
+ "-p" "2" "-m" "0")
-endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ add_test(NAME Alpha_complex_utilities_weighted_periodic_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.off"
+ "-w" "${CMAKE_SOURCE_DIR}/data/points/grid_10_10_10_in_0_1.weights"
+ "-c" "${CMAKE_SOURCE_DIR}/data/points/iso_cuboid_3_in_0_1.txt"
+ "-p" "2" "-m" "0" "-e")
+
+ install(TARGETS alpha_complex_3d_persistence DESTINATION bin)
+ endif()
+endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
index 929fc2e8..e65d8c6f 100644
--- a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
@@ -222,10 +222,7 @@ int main(int argc, char **argv) {
break;
}
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
- std::cout << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl;
+ std::clog << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl;
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree, true);
// initializes the coefficient field for homology
@@ -237,7 +234,7 @@ int main(int argc, char **argv) {
if (output_file_diag.empty()) {
pcoh.output_diagram();
} else {
- std::cout << "Result in file: " << output_file_diag << std::endl;
+ std::clog << "Result in file: " << output_file_diag << std::endl;
std::ofstream out(output_file_diag);
pcoh.output_diagram(out);
out.close();
@@ -266,7 +263,7 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool
"cuboid-file,c", po::value<std::string>(&cuboid_file),
"Name of file describing the periodic domain. Format is:\n min_hx min_hy min_hz\n max_hx max_hy max_hz")(
"output-file,o", po::value<std::string>(&output_file_diag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-alpha-square-value,r",
po::value<Filtration_value>(&alpha_square_max_value)
->default_value(std::numeric_limits<Filtration_value>::infinity()),
@@ -288,18 +285,18 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool
po::notify(vm);
if (vm.count("help") || !vm.count("input-file") || !vm.count("weight-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a 3D Alpha complex defined on a set of input points.\n";
- std::cout << "3D Alpha complex can be safe (by default) exact or fast, weighted and/or periodic\n\n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients.\n\n";
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a 3D Alpha complex defined on a set of input points.\n";
+ std::clog << "3D Alpha complex can be safe (by default) exact or fast, weighted and/or periodic\n\n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients.\n\n";
- std::cout << "Usage: " << argv[0] << " [options] input-file weight-file\n\n";
- std::cout << visible << std::endl;
+ std::clog << "Usage: " << argv[0] << " [options] input-file weight-file\n\n";
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
index 486347cc..29edbd8e 100644
--- a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
@@ -11,24 +11,88 @@
#include <boost/program_options.hpp>
#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
#include <gudhi/Alpha_complex.h>
#include <gudhi/Persistent_cohomology.h>
// to construct a simplex_tree from alpha complex
#include <gudhi/Simplex_tree.h>
+#include <gudhi/Points_off_io.h>
#include <iostream>
#include <string>
#include <limits> // for numeric_limits
+#include <vector>
+#include <fstream>
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
void program_options(int argc, char *argv[], std::string &off_file_points, bool &exact, bool &fast,
- std::string &output_file_diag, Filtration_value &alpha_square_max_value,
+ std::string &weight_file, std::string &output_file_diag, Filtration_value &alpha_square_max_value,
int &coeff_field_characteristic, Filtration_value &min_persistence);
+template<class Point_d>
+std::vector<Point_d> read_off(const std::string &off_file_points) {
+ Gudhi::Points_off_reader<Point_d> off_reader(off_file_points);
+ if (!off_reader.is_valid()) {
+ std::cerr << "Alpha_complex - Unable to read file " << off_file_points << "\n";
+ exit(-1); // ----- >>
+ }
+ return off_reader.get_point_cloud();
+}
+
+std::vector<double> read_weight_file(const std::string &weight_file) {
+ std::vector<double> weights;
+ // Read weights information from file
+ std::ifstream weights_ifstr(weight_file);
+ if (weights_ifstr.good()) {
+ double weight = 0.0;
+ // Attempt read the weight in a double format, return false if it fails
+ while (weights_ifstr >> weight) {
+ weights.push_back(weight);
+ }
+ } else {
+ std::cerr << "Unable to read weights file " << weight_file << std::endl;
+ exit(-1);
+ }
+ return weights;
+}
+
+template<class Kernel>
+Simplex_tree create_simplex_tree(const std::string &off_file_points, const std::string &weight_file,
+ bool exact_version, Filtration_value alpha_square_max_value) {
+ Simplex_tree stree;
+ auto points = read_off<typename Kernel::Point_d>(off_file_points);
+
+ if (weight_file != std::string()) {
+ std::vector<double> weights = read_weight_file(weight_file);
+ if (points.size() != weights.size()) {
+ std::cerr << "Alpha_complex - Inconsistency between number of points (" << points.size()
+ << ") and number of weights (" << weights.size() << ")" << "\n";
+ exit(-1); // ----- >>
+ }
+ // Init of an alpha complex from an OFF file
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_file(points, weights);
+
+ if (!alpha_complex_from_file.create_complex(stree, alpha_square_max_value, exact_version)) {
+ std::cerr << "Alpha complex simplicial complex creation failed." << std::endl;
+ exit(-1);
+ }
+ } else {
+ // Init of an alpha complex from an OFF file
+ Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_file(points);
+
+ if (!alpha_complex_from_file.create_complex(stree, alpha_square_max_value, exact_version)) {
+ std::cerr << "Alpha complex simplicial complex creation failed." << std::endl;
+ exit(-1);
+ }
+ }
+ return stree;
+}
+
int main(int argc, char **argv) {
+ std::string weight_file;
std::string off_file_points;
std::string output_file_diag;
bool exact_version = false;
@@ -37,51 +101,34 @@ int main(int argc, char **argv) {
int coeff_field_characteristic;
Filtration_value min_persistence;
- program_options(argc, argv, off_file_points, exact_version, fast_version, output_file_diag, alpha_square_max_value,
- coeff_field_characteristic, min_persistence);
+ program_options(argc, argv, off_file_points, exact_version, fast_version, weight_file, output_file_diag,
+ alpha_square_max_value, coeff_field_characteristic, min_persistence);
if ((exact_version) && (fast_version)) {
std::cerr << "You cannot set the exact and the fast version." << std::endl;
exit(-1);
}
- Simplex_tree simplex;
+ Simplex_tree stree;
if (fast_version) {
// WARNING : CGAL::Epick_d is fast but not safe (unlike CGAL::Epeck_d)
// (i.e. when the points are on a grid)
using Fast_kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
-
- // Init of an alpha complex from an OFF file
- Gudhi::alpha_complex::Alpha_complex<Fast_kernel> alpha_complex_from_file(off_file_points);
-
- if (!alpha_complex_from_file.create_complex(simplex, alpha_square_max_value)) {
- std::cerr << "Fast Alpha complex simplicial complex creation failed." << std::endl;
- exit(-1);
- }
+ stree = create_simplex_tree<Fast_kernel>(off_file_points, weight_file, exact_version, alpha_square_max_value);
} else {
using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
-
- // Init of an alpha complex from an OFF file
- Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_file(off_file_points);
-
- if (!alpha_complex_from_file.create_complex(simplex, alpha_square_max_value, exact_version)) {
- std::cerr << "Alpha complex simplicial complex creation failed." << std::endl;
- exit(-1);
- }
+ stree = create_simplex_tree<Kernel>(off_file_points, weight_file, exact_version, alpha_square_max_value);
}
// ----------------------------------------------------------------------------
// Display information about the alpha complex
// ----------------------------------------------------------------------------
- std::cout << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
- << " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
-
- // Sort the simplices in the order of the filtration
- simplex.initialize_filtration();
+ std::clog << "Simplicial complex is of dimension " << stree.dimension() << " - " << stree.num_simplices()
+ << " simplices - " << stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl;
+ std::clog << "Simplex_tree dim: " << stree.dimension() << std::endl;
// Compute the persistence diagram of the complex
Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Gudhi::persistent_cohomology::Field_Zp> pcoh(
- simplex);
+ stree);
// initializes the coefficient field for homology
pcoh.init_coefficients(coeff_field_characteristic);
@@ -91,7 +138,7 @@ int main(int argc, char **argv) {
if (output_file_diag.empty()) {
pcoh.output_diagram();
} else {
- std::cout << "Result in file: " << output_file_diag << std::endl;
+ std::clog << "Result in file: " << output_file_diag << std::endl;
std::ofstream out(output_file_diag);
pcoh.output_diagram(out);
out.close();
@@ -100,7 +147,7 @@ int main(int argc, char **argv) {
}
void program_options(int argc, char *argv[], std::string &off_file_points, bool &exact, bool &fast,
- std::string &output_file_diag, Filtration_value &alpha_square_max_value,
+ std::string &weight_file, std::string &output_file_diag, Filtration_value &alpha_square_max_value,
int &coeff_field_characteristic, Filtration_value &min_persistence) {
namespace po = boost::program_options;
po::options_description hidden("Hidden options");
@@ -113,8 +160,10 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool
"To activate exact version of Alpha complex (default is false, not available if fast is set)")(
"fast,f", po::bool_switch(&fast),
"To activate fast version of Alpha complex (default is false, not available if exact is set)")(
+ "weight-file,w", po::value<std::string>(&weight_file)->default_value(std::string()),
+ "Name of file containing a point weights. Format is one weight per line:\n W1\n ...\n Wn ")(
"output-file,o", po::value<std::string>(&output_file_diag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-alpha-square-value,r", po::value<Filtration_value>(&alpha_square_max_value)
->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal alpha square value for the Alpha complex construction.")(
@@ -135,17 +184,17 @@ void program_options(int argc, char *argv[], std::string &off_file_points, bool
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of an Alpha complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of an Alpha complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Alpha_complex/utilities/alphacomplex.md b/src/Alpha_complex/utilities/alphacomplex.md
index 527598a9..1e3b8fab 100644
--- a/src/Alpha_complex/utilities/alphacomplex.md
+++ b/src/Alpha_complex/utilities/alphacomplex.md
@@ -46,6 +46,9 @@ for the Alpha complex construction.
coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature
to be recorded. Enter a negative value to see zero length intervals.
+* `-w [ --weight-file ]` is the path to the file containing the weights of the
+points (one value per line).
+Default version is not weighted.
* `-e [ --exact ]` for the exact computation version.
* `-f [ --fast ]` for the fast computation version.
@@ -58,6 +61,10 @@ to be recorded. Enter a negative value to see zero length intervals.
N.B.:
* Filtration values are alpha square values.
+* Weights values are explained on CGAL
+[dD Triangulations](https://doc.cgal.org/latest/Triangulation/index.html)
+and
+[Regular triangulation](https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT) documentation.
## alpha_complex_3d_persistence ##
@@ -124,6 +131,6 @@ N.B.:
* `alpha_complex_3d_persistence` only accepts OFF files in dimension 3.
* Filtration values are alpha square values.
* Weights values are explained on CGAL
-[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0)
+[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions)
and
[Regular triangulation](https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation) documentation.
diff --git a/src/Bitmap_cubical_complex/example/CMakeLists.txt b/src/Bitmap_cubical_complex/example/CMakeLists.txt
index dc659f2d..0ff290ef 100644
--- a/src/Bitmap_cubical_complex/example/CMakeLists.txt
+++ b/src/Bitmap_cubical_complex/example/CMakeLists.txt
@@ -6,5 +6,3 @@ if (TBB_FOUND)
endif()
add_test(NAME Bitmap_cubical_complex_example_random COMMAND $<TARGET_FILE:Random_bitmap_cubical_complex>
"2" "100" "100")
-
-install(TARGETS Random_bitmap_cubical_complex DESTINATION bin)
diff --git a/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp b/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp
index 46ea8f2e..e5512418 100644
--- a/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp
+++ b/src/Bitmap_cubical_complex/example/Random_bitmap_cubical_complex.cpp
@@ -21,7 +21,7 @@
int main(int argc, char** argv) {
srand(time(0));
- std::cout
+ std::clog
<< "This program computes persistent homology, by using bitmap_cubical_complex class, of cubical "
<< "complexes. The first parameter of the program is the dimension D of the bitmap. The next D parameters are "
<< "number of top dimensional cubes in each dimension of the bitmap. The program will create random cubical "
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
index 37514dee..29fabc6c 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
@@ -69,7 +69,7 @@ class Bitmap_cubical_complex : public T {
Bitmap_cubical_complex(const char* perseus_style_file)
: T(perseus_style_file), key_associated_to_simplex(this->total_number_of_cells + 1) {
if (globalDbg) {
- std::cerr << "Bitmap_cubical_complex( const char* perseus_style_file )\n";
+ std::clog << "Bitmap_cubical_complex( const char* perseus_style_file )\n";
}
for (std::size_t i = 0; i != this->total_number_of_cells; ++i) {
this->key_associated_to_simplex[i] = i;
@@ -137,7 +137,7 @@ class Bitmap_cubical_complex : public T {
**/
static Simplex_handle null_simplex() {
if (globalDbg) {
- std::cerr << "Simplex_handle null_simplex()\n";
+ std::clog << "Simplex_handle null_simplex()\n";
}
return std::numeric_limits<Simplex_handle>::max();
}
@@ -152,7 +152,7 @@ class Bitmap_cubical_complex : public T {
**/
inline unsigned dimension(Simplex_handle sh) const {
if (globalDbg) {
- std::cerr << "unsigned dimension(const Simplex_handle& sh)\n";
+ std::clog << "unsigned dimension(const Simplex_handle& sh)\n";
}
if (sh != null_simplex()) return this->get_dimension_of_a_cell(sh);
return -1;
@@ -163,7 +163,7 @@ class Bitmap_cubical_complex : public T {
**/
Filtration_value filtration(Simplex_handle sh) {
if (globalDbg) {
- std::cerr << "Filtration_value filtration(const Simplex_handle& sh)\n";
+ std::clog << "Filtration_value filtration(const Simplex_handle& sh)\n";
}
// Returns the filtration value of a simplex.
if (sh != null_simplex()) return this->data[sh];
@@ -175,7 +175,7 @@ class Bitmap_cubical_complex : public T {
**/
static Simplex_key null_key() {
if (globalDbg) {
- std::cerr << "Simplex_key null_key()\n";
+ std::clog << "Simplex_key null_key()\n";
}
return std::numeric_limits<Simplex_handle>::max();
}
@@ -185,7 +185,7 @@ class Bitmap_cubical_complex : public T {
**/
Simplex_key key(Simplex_handle sh) const {
if (globalDbg) {
- std::cerr << "Simplex_key key(const Simplex_handle& sh)\n";
+ std::clog << "Simplex_key key(const Simplex_handle& sh)\n";
}
if (sh != null_simplex()) {
return this->key_associated_to_simplex[sh];
@@ -198,7 +198,7 @@ class Bitmap_cubical_complex : public T {
**/
Simplex_handle simplex(Simplex_key key) {
if (globalDbg) {
- std::cerr << "Simplex_handle simplex(Simplex_key key)\n";
+ std::clog << "Simplex_handle simplex(Simplex_key key)\n";
}
if (key != null_key()) {
return this->simplex_associated_to_key[key];
@@ -211,7 +211,7 @@ class Bitmap_cubical_complex : public T {
**/
void assign_key(Simplex_handle sh, Simplex_key key) {
if (globalDbg) {
- std::cerr << "void assign_key(Simplex_handle& sh, Simplex_key key)\n";
+ std::clog << "void assign_key(Simplex_handle& sh, Simplex_key key)\n";
}
if (key == null_key()) return;
this->key_associated_to_simplex[sh] = key;
@@ -237,21 +237,27 @@ class Bitmap_cubical_complex : public T {
* Filtration_simplex_iterator class provides an iterator though the whole structure in the order of filtration.
* Secondary criteria for filtration are:
* (1) Dimension of a cube (lower dimensional comes first).
- * (2) Position in the data structure (the ones that are earlies in the data structure comes first).
+ * (2) Position in the data structure (the ones that are earliest in the data structure come first).
**/
class Filtration_simplex_range;
- class Filtration_simplex_iterator : std::iterator<std::input_iterator_tag, Simplex_handle> {
+ class Filtration_simplex_iterator {
// Iterator over all simplices of the complex in the order of the indexing scheme.
// 'value_type' must be 'Simplex_handle'.
public:
+ typedef std::input_iterator_tag iterator_category;
+ typedef Simplex_handle value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_type* pointer;
+ typedef value_type reference;
+
Filtration_simplex_iterator(Bitmap_cubical_complex* b) : b(b), position(0) {}
Filtration_simplex_iterator() : b(NULL), position(0) {}
Filtration_simplex_iterator operator++() {
if (globalDbg) {
- std::cerr << "Filtration_simplex_iterator operator++\n";
+ std::clog << "Filtration_simplex_iterator operator++\n";
}
++this->position;
return (*this);
@@ -265,7 +271,7 @@ class Bitmap_cubical_complex : public T {
Filtration_simplex_iterator& operator=(const Filtration_simplex_iterator& rhs) {
if (globalDbg) {
- std::cerr << "Filtration_simplex_iterator operator =\n";
+ std::clog << "Filtration_simplex_iterator operator =\n";
}
this->b = rhs.b;
this->position = rhs.position;
@@ -274,21 +280,21 @@ class Bitmap_cubical_complex : public T {
bool operator==(const Filtration_simplex_iterator& rhs) const {
if (globalDbg) {
- std::cerr << "bool operator == ( const Filtration_simplex_iterator& rhs )\n";
+ std::clog << "bool operator == ( const Filtration_simplex_iterator& rhs )\n";
}
return (this->position == rhs.position);
}
bool operator!=(const Filtration_simplex_iterator& rhs) const {
if (globalDbg) {
- std::cerr << "bool operator != ( const Filtration_simplex_iterator& rhs )\n";
+ std::clog << "bool operator != ( const Filtration_simplex_iterator& rhs )\n";
}
return !(*this == rhs);
}
Simplex_handle operator*() {
if (globalDbg) {
- std::cerr << "Simplex_handle operator*()\n";
+ std::clog << "Simplex_handle operator*()\n";
}
return this->b->simplex_associated_to_key[this->position];
}
@@ -314,14 +320,14 @@ class Bitmap_cubical_complex : public T {
Filtration_simplex_iterator begin() {
if (globalDbg) {
- std::cerr << "Filtration_simplex_iterator begin() \n";
+ std::clog << "Filtration_simplex_iterator begin() \n";
}
return Filtration_simplex_iterator(this->b);
}
Filtration_simplex_iterator end() {
if (globalDbg) {
- std::cerr << "Filtration_simplex_iterator end()\n";
+ std::clog << "Filtration_simplex_iterator end()\n";
}
Filtration_simplex_iterator it(this->b);
it.position = this->b->simplex_associated_to_key.size();
@@ -347,7 +353,7 @@ class Bitmap_cubical_complex : public T {
**/
Filtration_simplex_range filtration_simplex_range() {
if (globalDbg) {
- std::cerr << "Filtration_simplex_range filtration_simplex_range()\n";
+ std::clog << "Filtration_simplex_range filtration_simplex_range()\n";
}
// Returns a range over the simplices of the complex in the order of the filtration
return Filtration_simplex_range(this);
@@ -370,8 +376,8 @@ class Bitmap_cubical_complex : public T {
std::pair<Simplex_handle, Simplex_handle> endpoints(Simplex_handle sh) {
std::vector<std::size_t> bdry = this->get_boundary_of_a_cell(sh);
if (globalDbg) {
- std::cerr << "std::pair<Simplex_handle, Simplex_handle> endpoints( Simplex_handle sh )\n";
- std::cerr << "bdry.size() : " << bdry.size() << "\n";
+ std::clog << "std::pair<Simplex_handle, Simplex_handle> endpoints( Simplex_handle sh )\n";
+ std::clog << "bdry.size() : " << bdry.size() << "\n";
}
// this method returns two first elements from the boundary of sh.
if (bdry.size() < 2)
@@ -386,13 +392,19 @@ class Bitmap_cubical_complex : public T {
**/
class Skeleton_simplex_range;
- class Skeleton_simplex_iterator : std::iterator<std::input_iterator_tag, Simplex_handle> {
+ class Skeleton_simplex_iterator {
// Iterator over all simplices of the complex in the order of the indexing scheme.
// 'value_type' must be 'Simplex_handle'.
public:
+ typedef std::input_iterator_tag iterator_category;
+ typedef Simplex_handle value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_type* pointer;
+ typedef value_type reference;
+
Skeleton_simplex_iterator(Bitmap_cubical_complex* b, std::size_t d) : b(b), dimension(d) {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n";
+ std::clog << "Skeleton_simplex_iterator ( Bitmap_cubical_complex* b , std::size_t d )\n";
}
// find the position of the first simplex of a dimension d
this->position = 0;
@@ -406,7 +418,7 @@ class Bitmap_cubical_complex : public T {
Skeleton_simplex_iterator operator++() {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator operator++()\n";
+ std::clog << "Skeleton_simplex_iterator operator++()\n";
}
// increment the position as long as you did not get to the next element of the dimension dimension.
++this->position;
@@ -425,7 +437,7 @@ class Bitmap_cubical_complex : public T {
Skeleton_simplex_iterator& operator=(const Skeleton_simplex_iterator& rhs) {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator operator =\n";
+ std::clog << "Skeleton_simplex_iterator operator =\n";
}
this->b = rhs.b;
this->position = rhs.position;
@@ -435,21 +447,21 @@ class Bitmap_cubical_complex : public T {
bool operator==(const Skeleton_simplex_iterator& rhs) const {
if (globalDbg) {
- std::cerr << "bool operator ==\n";
+ std::clog << "bool operator ==\n";
}
return (this->position == rhs.position);
}
bool operator!=(const Skeleton_simplex_iterator& rhs) const {
if (globalDbg) {
- std::cerr << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n";
+ std::clog << "bool operator != ( const Skeleton_simplex_iterator& rhs )\n";
}
return !(*this == rhs);
}
Simplex_handle operator*() {
if (globalDbg) {
- std::cerr << "Simplex_handle operator*() \n";
+ std::clog << "Simplex_handle operator*() \n";
}
return this->position;
}
@@ -476,14 +488,14 @@ class Bitmap_cubical_complex : public T {
Skeleton_simplex_iterator begin() {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator begin()\n";
+ std::clog << "Skeleton_simplex_iterator begin()\n";
}
return Skeleton_simplex_iterator(this->b, this->dimension);
}
Skeleton_simplex_iterator end() {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_iterator end()\n";
+ std::clog << "Skeleton_simplex_iterator end()\n";
}
Skeleton_simplex_iterator it(this->b, this->dimension);
it.position = this->b->data.size();
@@ -500,7 +512,7 @@ class Bitmap_cubical_complex : public T {
**/
Skeleton_simplex_range skeleton_simplex_range(unsigned dimension) {
if (globalDbg) {
- std::cerr << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n";
+ std::clog << "Skeleton_simplex_range skeleton_simplex_range( unsigned dimension )\n";
}
return Skeleton_simplex_range(this, dimension);
}
@@ -515,7 +527,7 @@ class Bitmap_cubical_complex : public T {
template <typename T>
void Bitmap_cubical_complex<T>::initialize_simplex_associated_to_key() {
if (globalDbg) {
- std::cerr << "void Bitmap_cubical_complex<T>::initialize_elements_ordered_according_to_filtration() \n";
+ std::clog << "void Bitmap_cubical_complex<T>::initialize_elements_ordered_according_to_filtration() \n";
}
this->simplex_associated_to_key = std::vector<std::size_t>(this->data.size());
std::iota(std::begin(simplex_associated_to_key), std::end(simplex_associated_to_key), 0);
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
index 0d6299d2..2bf62f9b 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
@@ -13,6 +13,8 @@
#include <gudhi/Bitmap_cubical_complex/counter.h>
+#include <boost/config.hpp>
+
#include <iostream>
#include <vector>
#include <string>
@@ -41,7 +43,7 @@ namespace cubical_complex {
* Each cell is represented by a single
* bit (in case of black and white bitmaps, or by a single element of a type T
* (here T is a filtration type of a bitmap, typically a double).
- * All the informations needed for homology and
+ * All the information needed for homology and
* persistent homology computations (like dimension of a cell, boundary and
* coboundary elements of a cell, are then obtained from the
* position of the element in C.
@@ -110,6 +112,16 @@ class Bitmap_cubical_complex_base {
virtual inline std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const;
/**
+ * This function finds a top-dimensional cell that is incident to the input cell and has
+ * the same filtration value. In case several cells are suitable, an arbitrary one is
+ * returned. Note that the input parameter can be a cell of any dimension (vertex, edge, etc).
+ * On the other hand, the output is always indicating the position of
+ * a top-dimensional cube in the data structure.
+ * \pre The filtration values are assigned as per `impose_lower_star_filtration()`.
+ **/
+ inline size_t get_top_dimensional_coface_of_a_cell(size_t splx);
+
+ /**
* This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of
* dimension n and a cube \f$B \subset A\f$ of dimension n-1, an incidence
* between \f$A\f$ and \f$B\f$ is the integer with which \f$B\f$ appears in the boundary of \f$A\f$.
@@ -142,7 +154,7 @@ class Bitmap_cubical_complex_base {
}
if (coface_counter[i] != face_counter[i]) {
if (number_of_position_in_which_counters_do_not_agree != -1) {
- std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
+ std::cerr << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
throw std::logic_error(
"Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.");
}
@@ -197,7 +209,7 @@ class Bitmap_cubical_complex_base {
/**
* Returns number of all cubes in the data structure.
**/
- inline unsigned size() const { return this->data.size(); }
+ inline std::size_t size() const { return this->data.size(); }
/**
* Writing to stream operator. By using it we get the values T of cells in order in which they are stored in the
@@ -239,8 +251,14 @@ class Bitmap_cubical_complex_base {
* @brief Iterator through all cells in the complex (in order they appear in the structure -- i.e.
* in lexicographical order).
**/
- class All_cells_iterator : std::iterator<std::input_iterator_tag, T> {
+ class All_cells_iterator {
public:
+ typedef std::input_iterator_tag iterator_category;
+ typedef std::size_t value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_type* pointer;
+ typedef value_type reference;
+
All_cells_iterator() { this->counter = 0; }
All_cells_iterator operator++() {
@@ -343,8 +361,14 @@ class Bitmap_cubical_complex_base {
* @brief Iterator through top dimensional cells of the complex. The cells appear in order they are stored
* in the structure (i.e. in lexicographical order)
**/
- class Top_dimensional_cells_iterator : std::iterator<std::input_iterator_tag, T> {
+ class Top_dimensional_cells_iterator {
public:
+ typedef std::input_iterator_tag iterator_category;
+ typedef std::size_t value_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef value_type* pointer;
+ typedef value_type reference;
+
Top_dimensional_cells_iterator(Bitmap_cubical_complex_base& b) : b(b) {
this->counter = std::vector<std::size_t>(b.dimension());
// std::fill( this->counter.begin() , this->counter.end() , 0 );
@@ -408,7 +432,7 @@ class Bitmap_cubical_complex_base {
void print_counter() const {
for (std::size_t i = 0; i != this->counter.size(); ++i) {
- std::cout << this->counter[i] << " ";
+ std::clog << this->counter[i] << " ";
}
}
friend class Bitmap_cubical_complex_base;
@@ -521,11 +545,11 @@ void Bitmap_cubical_complex_base<T>::put_data_to_bins(std::size_t number_of_bins
// now put the data into the appropriate bins:
for (std::size_t i = 0; i != this->data.size(); ++i) {
if (dbg) {
- std::cerr << "Before binning : " << this->data[i] << std::endl;
+ std::clog << "Before binning : " << this->data[i] << std::endl;
}
this->data[i] = min_max.first + dx * (this->data[i] - min_max.first) / number_of_bins;
if (dbg) {
- std::cerr << "After binning : " << this->data[i] << std::endl;
+ std::clog << "After binning : " << this->data[i] << std::endl;
}
}
}
@@ -539,11 +563,11 @@ void Bitmap_cubical_complex_base<T>::put_data_to_bins(T diameter_of_bin) {
// now put the data into the appropriate bins:
for (std::size_t i = 0; i != this->data.size(); ++i) {
if (dbg) {
- std::cerr << "Before binning : " << this->data[i] << std::endl;
+ std::clog << "Before binning : " << this->data[i] << std::endl;
}
this->data[i] = min_max.first + diameter_of_bin * (this->data[i] - min_max.first) / number_of_bins;
if (dbg) {
- std::cerr << "After binning : " << this->data[i] << std::endl;
+ std::clog << "After binning : " << this->data[i] << std::endl;
}
}
}
@@ -603,6 +627,19 @@ void Bitmap_cubical_complex_base<T>::setup_bitmap_based_on_top_dimensional_cells
}
template <typename T>
+size_t Bitmap_cubical_complex_base<T>::get_top_dimensional_coface_of_a_cell(size_t splx) {
+ if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;}
+ else{
+ for (auto v : this->get_coboundary_of_a_cell(splx)){
+ if(this->get_cell_data(v) == this->get_cell_data(splx)){
+ return this->get_top_dimensional_coface_of_a_cell(v);
+ }
+ }
+ }
+ BOOST_UNREACHABLE_RETURN(-2);
+}
+
+template <typename T>
Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base(const std::vector<unsigned>& sizes_in_following_directions,
const std::vector<T>& top_dimensional_cells) {
this->setup_bitmap_based_on_top_dimensional_cells_list(sizes_in_following_directions, top_dimensional_cells);
@@ -617,7 +654,7 @@ void Bitmap_cubical_complex_base<T>::read_perseus_style_file(const char* perseus
inFiltration >> dimensionOfData;
if (dbg) {
- std::cerr << "dimensionOfData : " << dimensionOfData << std::endl;
+ std::clog << "dimensionOfData : " << dimensionOfData << std::endl;
}
std::vector<unsigned> sizes;
@@ -630,7 +667,7 @@ void Bitmap_cubical_complex_base<T>::read_perseus_style_file(const char* perseus
sizes.push_back(size_in_this_dimension);
dimensions *= size_in_this_dimension;
if (dbg) {
- std::cerr << "size_in_this_dimension : " << size_in_this_dimension << std::endl;
+ std::clog << "size_in_this_dimension : " << size_in_this_dimension << std::endl;
}
}
this->set_up_containers(sizes);
@@ -651,7 +688,7 @@ void Bitmap_cubical_complex_base<T>::read_perseus_style_file(const char* perseus
}
if (dbg) {
- std::cerr << "Cell of an index : " << it.compute_index_in_bitmap()
+ std::clog << "Cell of an index : " << it.compute_index_in_bitmap()
<< " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
<< " get the value : " << filtrationLevel << std::endl;
}
@@ -754,20 +791,20 @@ std::vector<std::size_t> Bitmap_cubical_complex_base<T>::get_coboundary_of_a_cel
template <typename T>
unsigned Bitmap_cubical_complex_base<T>::get_dimension_of_a_cell(std::size_t cell) const {
bool dbg = false;
- if (dbg) std::cerr << "\n\n\n Computing position o a cell of an index : " << cell << std::endl;
+ if (dbg) std::clog << "\n\n\n Computing position o a cell of an index : " << cell << std::endl;
unsigned dimension = 0;
for (std::size_t i = this->multipliers.size(); i != 0; --i) {
unsigned position = cell / this->multipliers[i - 1];
if (dbg) {
- std::cerr << "i-1 :" << i - 1 << std::endl;
- std::cerr << "cell : " << cell << std::endl;
- std::cerr << "position : " << position << std::endl;
- std::cerr << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl;
+ std::clog << "i-1 :" << i - 1 << std::endl;
+ std::clog << "cell : " << cell << std::endl;
+ std::clog << "position : " << position << std::endl;
+ std::clog << "multipliers[" << i - 1 << "] = " << this->multipliers[i - 1] << std::endl;
}
if (position % 2 == 1) {
- if (dbg) std::cerr << "Nonzero length in this direction \n";
+ if (dbg) std::clog << "Nonzero length in this direction \n";
dimension++;
}
cell = cell % this->multipliers[i - 1];
@@ -803,9 +840,9 @@ void Bitmap_cubical_complex_base<T>::impose_lower_star_filtration() {
while (indices_to_consider.size()) {
if (dbg) {
- std::cerr << "indices_to_consider in this iteration \n";
+ std::clog << "indices_to_consider in this iteration \n";
for (std::size_t i = 0; i != indices_to_consider.size(); ++i) {
- std::cout << indices_to_consider[i] << " ";
+ std::clog << indices_to_consider[i] << " ";
}
}
std::vector<std::size_t> new_indices_to_consider;
@@ -813,14 +850,14 @@ void Bitmap_cubical_complex_base<T>::impose_lower_star_filtration() {
std::vector<std::size_t> bd = this->get_boundary_of_a_cell(indices_to_consider[i]);
for (std::size_t boundaryIt = 0; boundaryIt != bd.size(); ++boundaryIt) {
if (dbg) {
- std::cerr << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]]
+ std::clog << "filtration of a cell : " << bd[boundaryIt] << " is : " << this->data[bd[boundaryIt]]
<< " while of a cell: " << indices_to_consider[i] << " is: " << this->data[indices_to_consider[i]]
<< std::endl;
}
if (this->data[bd[boundaryIt]] > this->data[indices_to_consider[i]]) {
this->data[bd[boundaryIt]] = this->data[indices_to_consider[i]];
if (dbg) {
- std::cerr << "Setting the value of a cell : " << bd[boundaryIt]
+ std::clog << "Setting the value of a cell : " << bd[boundaryIt]
<< " to : " << this->data[indices_to_consider[i]] << std::endl;
}
}
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
index edd794fe..8ac7ae23 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h
@@ -83,7 +83,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
* The boundary elements are guaranteed to be returned so that the
* incidence coefficients are alternating.
*/
- virtual std::vector<std::size_t> get_boundary_of_a_cell(std::size_t cell) const;
+ virtual std::vector<std::size_t> get_boundary_of_a_cell(std::size_t cell) const override;
/**
* A version of a function that return coboundary of a given cell for an object of
@@ -93,7 +93,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
* To compute incidence between cells use compute_incidence_between_cells
* procedure
*/
- virtual std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const;
+ virtual std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const override;
/**
* This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of
@@ -114,7 +114,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
* @exception std::logic_error In case when the cube \f$B\f$ is not n-1
* dimensional face of a cube \f$A\f$.
**/
- virtual int compute_incidence_between_cells(std::size_t coface, std::size_t face) {
+ virtual int compute_incidence_between_cells(std::size_t coface, std::size_t face) const override {
// first get the counters for coface and face:
std::vector<unsigned> coface_counter = this->compute_counter_for_given_cell(coface);
std::vector<unsigned> face_counter = this->compute_counter_for_given_cell(face);
@@ -128,7 +128,7 @@ class Bitmap_cubical_complex_periodic_boundary_conditions_base : public Bitmap_c
}
if (coface_counter[i] != face_counter[i]) {
if (number_of_position_in_which_counters_do_not_agree != -1) {
- std::cout << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
+ std::cerr << "Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.\n";
throw std::logic_error(
"Cells given to compute_incidence_between_cells procedure do not form a pair of coface-face.");
}
@@ -237,7 +237,7 @@ Bitmap_cubical_complex_periodic_boundary_conditions_base<T>::Bitmap_cubical_comp
if (inFiltration.eof()) break;
if (dbg) {
- std::cerr << "Cell of an index : " << it.compute_index_in_bitmap()
+ std::clog << "Cell of an index : " << it.compute_index_in_bitmap()
<< " and dimension: " << this->get_dimension_of_a_cell(it.compute_index_in_bitmap())
<< " get the value : " << filtrationLevel << std::endl;
}
@@ -278,7 +278,7 @@ std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_bas
std::size_t cell) const {
bool dbg = false;
if (dbg) {
- std::cerr << "Computations of boundary of a cell : " << cell << std::endl;
+ std::clog << "Computations of boundary of a cell : " << cell << std::endl;
}
std::vector<std::size_t> boundary_elements;
@@ -292,7 +292,6 @@ std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_bas
if (position % 2 == 1) {
// if there are no periodic boundary conditions in this direction, we do not have to do anything.
if (!directions_in_which_periodic_b_cond_are_to_be_imposed[i - 1]) {
- // std::cerr << "A\n";
if (sum_of_dimensions % 2) {
boundary_elements.push_back(cell - this->multipliers[i - 1]);
boundary_elements.push_back(cell + this->multipliers[i - 1]);
@@ -301,12 +300,11 @@ std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_bas
boundary_elements.push_back(cell - this->multipliers[i - 1]);
}
if (dbg) {
- std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
+ std::clog << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
}
} else {
// in this direction we have to do boundary conditions. Therefore, we need to check if we are not at the end.
if (position != 2 * this->sizes[i - 1] - 1) {
- // std::cerr << "B\n";
if (sum_of_dimensions % 2) {
boundary_elements.push_back(cell - this->multipliers[i - 1]);
boundary_elements.push_back(cell + this->multipliers[i - 1]);
@@ -315,10 +313,9 @@ std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_bas
boundary_elements.push_back(cell - this->multipliers[i - 1]);
}
if (dbg) {
- std::cerr << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
+ std::clog << cell - this->multipliers[i - 1] << " " << cell + this->multipliers[i - 1] << " ";
}
} else {
- // std::cerr << "C\n";
if (sum_of_dimensions % 2) {
boundary_elements.push_back(cell - this->multipliers[i - 1]);
boundary_elements.push_back(cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1]);
@@ -327,7 +324,7 @@ std::vector<std::size_t> Bitmap_cubical_complex_periodic_boundary_conditions_bas
boundary_elements.push_back(cell - this->multipliers[i - 1]);
}
if (dbg) {
- std::cerr << cell - this->multipliers[i - 1] << " "
+ std::clog << cell - this->multipliers[i - 1] << " "
<< cell - (2 * this->sizes[i - 1] - 1) * this->multipliers[i - 1] << " ";
}
}
diff --git a/src/Bitmap_cubical_complex/test/Bitmap_test.cpp b/src/Bitmap_cubical_complex/test/Bitmap_test.cpp
index f18adb36..6f35b6da 100644
--- a/src/Bitmap_cubical_complex/test/Bitmap_test.cpp
+++ b/src/Bitmap_cubical_complex/test/Bitmap_test.cpp
@@ -1402,12 +1402,12 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_2d) {
it != ba.all_cells_iterator_end(); ++it) {
int i = 1;
- // std::cout << "Element : " << *it << std::endl;
+ // std::clog << "Element : " << *it << std::endl;
Bitmap_cubical_complex_periodic_boundary_conditions_base::Boundary_range bdrange = ba.boundary_range(*it);
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin();
bd != bdrange.end(); ++bd) {
- // std::cout << *bd << " ";
+ // std::clog << *bd << " ";
Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd);
int j = 1;
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin();
@@ -1441,7 +1441,7 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_3d) {
std::vector<int> elems_in_boundary(number_of_all_elements, 0);
for (Bitmap_cubical_complex_periodic_boundary_conditions::All_cells_iterator it = ba.all_cells_iterator_begin();
it != ba.all_cells_iterator_end(); ++it) {
- // std::cout << "Element : " << *it << std::endl;
+ // std::clog << "Element : " << *it << std::endl;
int i = 1;
@@ -1449,7 +1449,7 @@ BOOST_AUTO_TEST_CASE(check_if_boundary_of_boundary_is_zero_periodic_case_3d) {
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin();
bd != bdrange.end(); ++bd) {
Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd);
- // std::cout << *bd << " ";
+ // std::clog << *bd << " ";
int j = 1;
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin();
bd2 != second_bdrange.end(); ++bd2) {
@@ -1551,7 +1551,7 @@ BOOST_AUTO_TEST_CASE(compute_incidence_between_cells_test_periodic_boundary_cond
Bitmap_cubical_complex_periodic_boundary_conditions_base::Boundary_range bdrange = ba.boundary_range(*it);
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd = bdrange.begin();
bd != bdrange.end(); ++bd) {
- // std::cout << *bd << " ";
+ // std::clog << *bd << " ";
Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_range second_bdrange = ba.boundary_range(*bd);
for (Bitmap_cubical_complex_periodic_boundary_conditions::Boundary_iterator bd2 = second_bdrange.begin();
bd2 != second_bdrange.end(); ++bd2) {
@@ -1571,11 +1571,11 @@ BOOST_AUTO_TEST_CASE(perseus_file_read) {
auto it = increasing.top_dimensional_cells_iterator_begin();
double value = increasing.get_cell_data(*it);
- std::cout << "First value of sinusoid.txt is " << value << std::endl;
+ std::clog << "First value of sinusoid.txt is " << value << std::endl;
BOOST_CHECK(value == 10.);
// Next value
++it;
value = increasing.get_cell_data(*it);
- std::cout << "Second value of sinusoid.txt is " << value << std::endl;
+ std::clog << "Second value of sinusoid.txt is " << value << std::endl;
BOOST_CHECK(value == std::numeric_limits<double>::infinity());
}
diff --git a/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp b/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp
index a9792c2d..510861cd 100644
--- a/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp
+++ b/src/Bitmap_cubical_complex/utilities/cubical_complex_persistence.cpp
@@ -19,7 +19,7 @@
#include <cstddef>
int main(int argc, char** argv) {
- std::cout
+ std::clog
<< "This program computes persistent homology, by using bitmap_cubical_complex class, of cubical "
<< "complexes provided in text files in Perseus style (the only numbered in the first line is a dimension D of a"
<< "bitmap. In the lines I between 2 and D+1 there are numbers of top dimensional cells in the direction I. Let "
@@ -62,7 +62,7 @@ int main(int argc, char** argv) {
pcoh.output_diagram(out);
out.close();
- std::cout << "Result in file: " << output_file_name << "\n";
+ std::clog << "Result in file: " << output_file_name << "\n";
return 0;
}
diff --git a/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp b/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp
index fa97bac0..86816417 100644
--- a/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp
+++ b/src/Bitmap_cubical_complex/utilities/periodic_cubical_complex_persistence.cpp
@@ -20,7 +20,7 @@
#include <string>
int main(int argc, char** argv) {
- std::cout
+ std::clog
<< "This program computes persistent homology, by using "
<< "Bitmap_cubical_complex_periodic_boundary_conditions class, of cubical complexes provided in text files in "
<< "Perseus style (the only numbered in the first line is a dimension D of a bitmap. In the lines I between 2 "
@@ -64,7 +64,7 @@ int main(int argc, char** argv) {
pcoh.output_diagram(out);
out.close();
- std::cout << "Result in file: " << output_file_name << "\n";
+ std::clog << "Result in file: " << output_file_name << "\n";
return 0;
}
diff --git a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
index bbc952e1..4f5a956c 100644
--- a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
+++ b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
@@ -52,7 +52,7 @@ int main() {
diag2.emplace_back(0., 13.);
double b = Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2);
- std::cout << "Bottleneck distance = " << b << std::endl;
+ std::clog << "Bottleneck distance = " << b << std::endl;
}
* \endcode
*
@@ -64,7 +64,7 @@ int main() {
* \section bottleneckbasicexample Basic example
*
* This other example computes the bottleneck distance from 2 persistence diagrams:
- * \include Bottleneck_distance/bottleneck_basic_example.cpp
+ * \include bottleneck_basic_example.cpp
*
* \code
Bottleneck distance = 0.75
diff --git a/src/Bottleneck_distance/doc/perturb_pd.png b/src/Bottleneck_distance/doc/perturb_pd.png
index be638de0..eabf3c8c 100644
--- a/src/Bottleneck_distance/doc/perturb_pd.png
+++ b/src/Bottleneck_distance/doc/perturb_pd.png
Binary files differ
diff --git a/src/Bottleneck_distance/example/CMakeLists.txt b/src/Bottleneck_distance/example/CMakeLists.txt
index 3d65963a..d16ea6e5 100644
--- a/src/Bottleneck_distance/example/CMakeLists.txt
+++ b/src/Bottleneck_distance/example/CMakeLists.txt
@@ -12,14 +12,16 @@ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- add_executable (alpha_rips_persistence_bottleneck_distance alpha_rips_persistence_bottleneck_distance.cpp)
- target_link_libraries(alpha_rips_persistence_bottleneck_distance ${Boost_PROGRAM_OPTIONS_LIBRARY})
+ if (TARGET Boost::program_options)
+ add_executable (alpha_rips_persistence_bottleneck_distance alpha_rips_persistence_bottleneck_distance.cpp)
+ target_link_libraries(alpha_rips_persistence_bottleneck_distance Boost::program_options)
- if (TBB_FOUND)
- target_link_libraries(alpha_rips_persistence_bottleneck_distance ${TBB_LIBRARIES})
- endif(TBB_FOUND)
-
- add_test(NAME Bottleneck_distance_example_alpha_rips_persistence_bottleneck
- COMMAND $<TARGET_FILE:alpha_rips_persistence_bottleneck_distance>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.15" "-m" "0.12" "-d" "3" "-p" "3")
+ if (TBB_FOUND)
+ target_link_libraries(alpha_rips_persistence_bottleneck_distance ${TBB_LIBRARIES})
+ endif(TBB_FOUND)
+
+ add_test(NAME Bottleneck_distance_example_alpha_rips_persistence_bottleneck
+ COMMAND $<TARGET_FILE:alpha_rips_persistence_bottleneck_distance>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.15" "-m" "0.12" "-d" "3" "-p" "3")
+ endif()
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
index 6c0dc9bf..ceb9e226 100644
--- a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
+++ b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
@@ -68,12 +68,9 @@ int main(int argc, char * argv[]) {
Simplex_tree rips_stree;
rips_complex.create_complex(rips_stree, dim_max);
- std::cout << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension "
+ std::clog << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension "
<< rips_stree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- rips_stree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology rips_pcoh(rips_stree);
// initializes the coefficient field for homology
@@ -89,12 +86,9 @@ int main(int argc, char * argv[]) {
Simplex_tree alpha_stree;
alpha_complex.create_complex(alpha_stree, threshold * threshold);
- std::cout << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension "
+ std::clog << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension "
<< alpha_stree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- alpha_stree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology alpha_pcoh(alpha_stree);
// initializes the coefficient field for homology
@@ -115,12 +109,12 @@ int main(int argc, char * argv[]) {
std::transform(alpha_intervals.begin(), alpha_intervals.end(), alpha_intervals.begin(), compute_root_square);
double bottleneck_distance = Gudhi::persistence_diagram::bottleneck_distance(rips_intervals, alpha_intervals);
- std::cout << "In dimension " << dim << ", bottleneck distance = " << bottleneck_distance << std::endl;
+ std::clog << "In dimension " << dim << ", bottleneck distance = " << bottleneck_distance << std::endl;
if (bottleneck_distance > max_b_distance)
max_b_distance = bottleneck_distance;
}
- std::cout << "================================================================================" << std::endl;
- std::cout << "Bottleneck distance is " << max_b_distance << std::endl;
+ std::clog << "================================================================================" << std::endl;
+ std::clog << "Bottleneck distance is " << max_b_distance << std::endl;
return 0;
}
@@ -162,17 +156,17 @@ void program_options(int argc, char * argv[]
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Bottleneck_distance/example/bottleneck_basic_example.cpp b/src/Bottleneck_distance/example/bottleneck_basic_example.cpp
index 61778a55..e8632a4f 100644
--- a/src/Bottleneck_distance/example/bottleneck_basic_example.cpp
+++ b/src/Bottleneck_distance/example/bottleneck_basic_example.cpp
@@ -20,9 +20,9 @@ int main() {
double b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2);
- std::cout << "Bottleneck distance = " << b << std::endl;
+ std::clog << "Bottleneck distance = " << b << std::endl;
b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.1);
- std::cout << "Approx bottleneck distance = " << b << std::endl;
+ std::clog << "Approx bottleneck distance = " << b << std::endl;
}
diff --git a/src/Bottleneck_distance/include/gudhi/Bottleneck.h b/src/Bottleneck_distance/include/gudhi/Bottleneck.h
index e466828a..c916898d 100644
--- a/src/Bottleneck_distance/include/gudhi/Bottleneck.h
+++ b/src/Bottleneck_distance/include/gudhi/Bottleneck.h
@@ -35,8 +35,12 @@ namespace persistence_diagram {
inline double bottleneck_distance_approx(Persistence_graph& g, double e) {
double b_lower_bound = 0.;
- double b_upper_bound = g.diameter_bound();
- const double alpha = std::pow(g.size(), 1. / 5.);
+ double b_upper_bound = g.max_dist_to_diagonal();
+ int siz = g.size();
+ if (siz <= 1)
+ // The value of alpha would be wrong in this case
+ return b_upper_bound;
+ const double alpha = std::pow(siz, 1. / 5.);
Graph_matching m(g);
Graph_matching biggest_unperfect(g);
while (b_upper_bound - b_lower_bound > 2 * e) {
diff --git a/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h b/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
index c65e6082..1d56f0b4 100644
--- a/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
+++ b/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
@@ -86,7 +86,7 @@ class Neighbors_finder {
};
/** \internal \brief data structure used to find any point (including projections) in V near to a query point from U
- * (which can be a projection) in a layered graph layer given as parmeter.
+ * (which can be a projection) in a layered graph layer given as parameter.
*
* V points have to be added manually using their index and before the first pull. A neighbor pulled is automatically
* removed.
diff --git a/src/Bottleneck_distance/include/gudhi/Persistence_graph.h b/src/Bottleneck_distance/include/gudhi/Persistence_graph.h
index f791e37c..c1e10f8e 100644
--- a/src/Bottleneck_distance/include/gudhi/Persistence_graph.h
+++ b/src/Bottleneck_distance/include/gudhi/Persistence_graph.h
@@ -20,18 +20,19 @@
#include <vector>
#include <algorithm>
#include <limits> // for numeric_limits
+#include <cmath>
namespace Gudhi {
namespace persistence_diagram {
-/** \internal \brief Structure representing an euclidean bipartite graph containing
+/** \internal \brief Structure representing a Euclidean bipartite graph containing
* the points from the two persistence diagrams (including the projections).
*
* \ingroup bottleneck_distance
*/
class Persistence_graph {
- public:
+public:
/** \internal \brief Constructor taking 2 PersistenceDiagrams (concept) as parameters. */
template<typename Persistence_diagram1, typename Persistence_diagram2>
Persistence_graph(const Persistence_diagram1& diag1, const Persistence_diagram2& diag2, double e);
@@ -45,20 +46,20 @@ class Persistence_graph {
int corresponding_point_in_v(int u_point_index) const;
/** \internal \brief Given a point from U and a point from V, returns the distance between those points. */
double distance(int u_point_index, int v_point_index) const;
- /** \internal \brief Returns size = |U| = |V|. */
+ /** \internal \brief Returns size = |U| + |V|. */
int size() const;
/** \internal \brief Is there as many infinite points (alive components) in both diagrams ? */
double bottleneck_alive() const;
/** \internal \brief Returns the O(n^2) sorted distances between the points. */
std::vector<double> sorted_distances() const;
- /** \internal \brief Returns an upper bound for the diameter of the convex hull of all non infinite points */
- double diameter_bound() const;
+ /** \internal \brief Returns an upper bound for the bottleneck distance of the finite points. */
+ double max_dist_to_diagonal() const;
/** \internal \brief Returns the corresponding internal point */
Internal_point get_u_point(int u_point_index) const;
/** \internal \brief Returns the corresponding internal point */
Internal_point get_v_point(int v_point_index) const;
- private:
+private:
std::vector<Internal_point> u;
std::vector<Internal_point> v;
double b_alive;
@@ -67,30 +68,54 @@ class Persistence_graph {
template<typename Persistence_diagram1, typename Persistence_diagram2>
Persistence_graph::Persistence_graph(const Persistence_diagram1 &diag1,
const Persistence_diagram2 &diag2, double e)
- : u(), v(), b_alive(0.) {
+ : u(), v(), b_alive(0.) {
std::vector<double> u_alive;
std::vector<double> v_alive;
+ std::vector<double> u_nalive;
+ std::vector<double> v_nalive;
+ int u_inf = 0;
+ int v_inf = 0;
+ double inf = std::numeric_limits<double>::infinity();
+ double neginf = -inf;
+
for (auto it = std::begin(diag1); it != std::end(diag1); ++it) {
- if (std::get<1>(*it) == std::numeric_limits<double>::infinity())
- u_alive.push_back(std::get<0>(*it));
- else if (std::get<1>(*it) - std::get<0>(*it) > e)
- u.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), u.size()));
+ if (std::get<0>(*it) != inf && std::get<1>(*it) != neginf){
+ if (std::get<0>(*it) == neginf && std::get<1>(*it) == inf)
+ u_inf++;
+ else if (std::get<0>(*it) == neginf)
+ u_nalive.push_back(std::get<1>(*it));
+ else if (std::get<1>(*it) == inf)
+ u_alive.push_back(std::get<0>(*it));
+ else if (std::get<1>(*it) - std::get<0>(*it) > e)
+ u.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), u.size()));
+ }
}
for (auto it = std::begin(diag2); it != std::end(diag2); ++it) {
- if (std::get<1>(*it) == std::numeric_limits<double>::infinity())
- v_alive.push_back(std::get<0>(*it));
- else if (std::get<1>(*it) - std::get<0>(*it) > e)
- v.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), v.size()));
+ if (std::get<0>(*it) != inf && std::get<1>(*it) != neginf){
+ if (std::get<0>(*it) == neginf && std::get<1>(*it) == inf)
+ v_inf++;
+ else if (std::get<0>(*it) == neginf)
+ v_nalive.push_back(std::get<1>(*it));
+ else if (std::get<1>(*it) == inf)
+ v_alive.push_back(std::get<0>(*it));
+ else if (std::get<1>(*it) - std::get<0>(*it) > e)
+ v.push_back(Internal_point(std::get<0>(*it), std::get<1>(*it), v.size()));
+ }
}
if (u.size() < v.size())
swap(u, v);
- std::sort(u_alive.begin(), u_alive.end());
- std::sort(v_alive.begin(), v_alive.end());
- if (u_alive.size() != v_alive.size()) {
+
+ if (u_alive.size() != v_alive.size() || u_nalive.size() != v_nalive.size() || u_inf != v_inf) {
b_alive = std::numeric_limits<double>::infinity();
} else {
+ std::sort(u_alive.begin(), u_alive.end());
+ std::sort(v_alive.begin(), v_alive.end());
+ std::sort(u_nalive.begin(), u_nalive.end());
+ std::sort(v_nalive.begin(), v_nalive.end());
for (auto it_u = u_alive.cbegin(), it_v = v_alive.cbegin(); it_u != u_alive.cend(); ++it_u, ++it_v)
b_alive = (std::max)(b_alive, std::fabs(*it_u - *it_v));
+ for (auto it_u = u_nalive.cbegin(), it_v = v_nalive.cbegin(); it_u != u_nalive.cend(); ++it_u, ++it_v)
+ b_alive = (std::max)(b_alive, std::fabs(*it_u - *it_v));
}
}
@@ -104,12 +129,12 @@ inline bool Persistence_graph::on_the_v_diagonal(int v_point_index) const {
inline int Persistence_graph::corresponding_point_in_u(int v_point_index) const {
return on_the_v_diagonal(v_point_index) ?
- v_point_index - static_cast<int> (v.size()) : v_point_index + static_cast<int> (u.size());
+ v_point_index - static_cast<int> (v.size()) : v_point_index + static_cast<int> (u.size());
}
inline int Persistence_graph::corresponding_point_in_v(int u_point_index) const {
return on_the_u_diagonal(u_point_index) ?
- u_point_index - static_cast<int> (u.size()) : u_point_index + static_cast<int> (v.size());
+ u_point_index - static_cast<int> (u.size()) : u_point_index + static_cast<int> (v.size());
}
inline double Persistence_graph::distance(int u_point_index, int v_point_index) const {
@@ -160,13 +185,13 @@ inline Internal_point Persistence_graph::get_v_point(int v_point_index) const {
return Internal_point(m, m, v_point_index);
}
-inline double Persistence_graph::diameter_bound() const {
+inline double Persistence_graph::max_dist_to_diagonal() const {
double max = 0.;
- for (auto it = u.cbegin(); it != u.cend(); it++)
- max = (std::max)(max, it->y());
- for (auto it = v.cbegin(); it != v.cend(); it++)
- max = (std::max)(max, it->y());
- return max;
+ for (auto& p : u)
+ max = (std::max)(max, p.y() - p.x());
+ for (auto& p : v)
+ max = (std::max)(max, p.y() - p.x());
+ return max / 2;
}
} // namespace persistence_diagram
diff --git a/src/Bottleneck_distance/test/bottleneck_unit_test.cpp b/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
index 2c520045..9872f20c 100644
--- a/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
+++ b/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
@@ -153,4 +153,87 @@ BOOST_AUTO_TEST_CASE(global) {
BOOST_CHECK(bottleneck_distance(v1, v2, 0.) <= upper_bound / 100.);
BOOST_CHECK(bottleneck_distance(v1, v2, upper_bound / 10000.) <= upper_bound / 100. + upper_bound / 10000.);
BOOST_CHECK(std::abs(bottleneck_distance(v1, v2, 0.) - bottleneck_distance(v1, v2, upper_bound / 10000.)) <= upper_bound / 10000.);
+
+ std::vector< std::pair<double, double> > empty;
+ std::vector< std::pair<double, double> > one = {{8, 10}};
+ BOOST_CHECK(bottleneck_distance(empty, empty) == 0);
+ BOOST_CHECK(bottleneck_distance(empty, one) == 1);
+}
+
+BOOST_AUTO_TEST_CASE(neg_global) {
+ std::uniform_real_distribution<double> unif1(0., upper_bound);
+ std::uniform_real_distribution<double> unif2(upper_bound / 10000., upper_bound / 100.);
+ std::default_random_engine re;
+ std::vector< std::pair<double, double> > v1, v2;
+ for (int i = 0; i < n1; i++) {
+ double a = std::log(unif1(re));
+ double b = std::log(unif1(re));
+ double x = std::log(unif2(re));
+ double y = std::log(unif2(re));
+ v1.emplace_back(std::min(a, b), std::max(a, b));
+ v2.emplace_back(std::min(a, b) + std::min(x, y), std::max(a, b) + std::max(x, y));
+ if (i % 5 == 0)
+ v1.emplace_back(std::min(a, b), std::min(a, b) + x);
+ if (i % 3 == 0)
+ v2.emplace_back(std::max(a, b), std::max(a, b) + y);
+ }
+ BOOST_CHECK(bottleneck_distance(v1, v2, 0.) <= upper_bound / 100.);
+ BOOST_CHECK(bottleneck_distance(v1, v2, upper_bound / 10000.) <= upper_bound / 100. + upper_bound / 10000.);
+ BOOST_CHECK(std::abs(bottleneck_distance(v1, v2, 0.) - bottleneck_distance(v1, v2, upper_bound / 10000.)) <= upper_bound / 10000.);
+
+ std::vector< std::pair<double, double> > empty;
+ std::vector< std::pair<double, double> > one = {{8, 10}};
+ BOOST_CHECK(bottleneck_distance(empty, empty) == 0);
+ BOOST_CHECK(bottleneck_distance(empty, one) == 1);
+}
+
+BOOST_AUTO_TEST_CASE(bottleneck_simple_test) {
+ std::vector< std::pair<double, double> > v1, v2;
+ double inf = std::numeric_limits<double>::infinity();
+ double neginf = -inf;
+ double b;
+
+ v1.emplace_back(9.6, 14.);
+ v2.emplace_back(9.5, 14.1);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.09 && b < 0.11);
+
+ v1.emplace_back(-34.974, -34.2);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.386 && b < 0.388);
+
+ v1.emplace_back(neginf, 3.7);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK_EQUAL(b, inf);
+
+ v2.emplace_back(neginf, 4.45);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.74 && b < 0.76);
+
+ v1.emplace_back(-60.6, 52.1);
+ v2.emplace_back(-61.5, 53.);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.89 && b < 0.91);
+
+ v1.emplace_back(3., inf);
+ v2.emplace_back(3.2, inf);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.89 && b < 0.91);
+
+ v1.emplace_back(neginf, inf);
+ v2.emplace_back(neginf, inf);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK(b > 0.89 && b < 0.91);
+
+ v2.emplace_back(6, inf);
+
+ b = Gudhi::persistence_diagram::bottleneck_distance(v1, v2, 0.);
+ BOOST_CHECK_EQUAL(b, inf);
}
diff --git a/src/Bottleneck_distance/utilities/bottleneck_distance.cpp b/src/Bottleneck_distance/utilities/bottleneck_distance.cpp
index d88a8a0b..01813ba1 100644
--- a/src/Bottleneck_distance/utilities/bottleneck_distance.cpp
+++ b/src/Bottleneck_distance/utilities/bottleneck_distance.cpp
@@ -18,7 +18,7 @@
int main(int argc, char** argv) {
if (argc < 3) {
- std::cout << "To run this program please provide as an input two files with persistence diagrams. Each file" <<
+ std::clog << "To run this program please provide as an input two files with persistence diagrams. Each file" <<
" should contain a birth-death pair per line. Third, optional parameter is an error bound on the bottleneck" <<
" distance (set by default to the smallest positive double value). If you set the error bound to 0, be" <<
" aware this version is exact but expensive. The program will now terminate \n";
@@ -32,7 +32,7 @@ int main(int argc, char** argv) {
tolerance = atof(argv[3]);
}
double b = Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, tolerance);
- std::cout << "The distance between the diagrams is : " << b << ". The tolerance is : " << tolerance << std::endl;
+ std::clog << "The distance between the diagrams is : " << b << ". The tolerance is : " << tolerance << std::endl;
return 0;
}
diff --git a/src/Bottleneck_distance/utilities/bottleneckdistance.md b/src/Bottleneck_distance/utilities/bottleneckdistance.md
index a81426cf..2f5dedc9 100644
--- a/src/Bottleneck_distance/utilities/bottleneckdistance.md
+++ b/src/Bottleneck_distance/utilities/bottleneckdistance.md
@@ -10,14 +10,14 @@ Leave the lines above as it is required by the web site generator 'Jekyll'
{:/comment}
-## bottleneck_read_file_example ##
+## bottleneck_distance ##
This program computes the Bottleneck distance between two persistence diagram files.
**Usage**
```
- bottleneck_read_file_example <file_1.pers> <file_2.pers> [<tolerance>]
+ bottleneck_distance <file_1.pers> <file_2.pers> [<tolerance>]
```
where
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 561aa049..f9f77ef7 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,10 +1,10 @@
-cmake_minimum_required(VERSION 3.1)
+cmake_minimum_required(VERSION 3.5)
project(GUDHI)
-include(CMakeGUDHIVersion.txt)
-
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/")
+include(CMakeGUDHIVersion.txt)
+include(GUDHI_options)
set(GUDHI_MODULES "" CACHE INTERNAL "GUDHI_MODULES")
set(GUDHI_MISSING_MODULES "" CACHE INTERNAL "GUDHI_MISSING_MODULES")
@@ -12,8 +12,12 @@ set(GUDHI_MISSING_MODULES "" CACHE INTERNAL "GUDHI_MISSING_MODULES")
# This variable is used by Cython CMakeLists.txt and by GUDHI_third_party_libraries to know its path
set(GUDHI_PYTHON_PATH "python")
-# For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
-include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+include(GUDHI_submodules)
+
+if (WITH_GUDHI_THIRD_PARTY)
+ # For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
+ include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+endif()
include(GUDHI_compilation_flags)
@@ -26,6 +30,8 @@ add_gudhi_module(Bitmap_cubical_complex)
add_gudhi_module(Bottleneck_distance)
add_gudhi_module(Cech_complex)
add_gudhi_module(Contraction)
+add_gudhi_module(Collapse)
+add_gudhi_module(Coxeter_triangulation)
add_gudhi_module(Hasse_complex)
add_gudhi_module(Persistence_representations)
add_gudhi_module(Persistent_cohomology)
@@ -49,6 +55,14 @@ include_directories(include)
# Include module CMake subdirectories
# GUDHI_SUB_DIRECTORIES is managed in CMAKE_MODULE_PATH/GUDHI_modules.cmake
+if (WITH_GUDHI_PYTHON)
+ # specific for cython module
+ add_subdirectory(${GUDHI_PYTHON_PATH})
+else()
+ message("++ Python module will not be compiled because WITH_GUDHI_PYTHON is set to OFF")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python")
+endif()
+
foreach(GUDHI_MODULE ${GUDHI_MODULES})
foreach(GUDHI_SUB_DIRECTORY ${GUDHI_SUB_DIRECTORIES})
if(EXISTS ${CMAKE_SOURCE_DIR}/${GUDHI_SUB_DIRECTORY}/${GUDHI_MODULE}/CMakeLists.txt)
@@ -57,14 +71,8 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES})
endforeach()
endforeach()
-add_subdirectory(GudhUI)
-
-if (WITH_GUDHI_PYTHON)
- # specific for cython module
- add_subdirectory(${GUDHI_PYTHON_PATH})
-else()
- message("++ Python module will not be compiled because WITH_GUDHI_PYTHON is set to OFF")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python")
+if (WITH_GUDHI_THIRD_PARTY)
+ add_subdirectory(GudhUI)
endif()
message("++ GUDHI_MODULES list is:\"${GUDHI_MODULES}\"")
diff --git a/src/Cech_complex/benchmark/CMakeLists.txt b/src/Cech_complex/benchmark/CMakeLists.txt
index b7697764..a6b3d70b 100644
--- a/src/Cech_complex/benchmark/CMakeLists.txt
+++ b/src/Cech_complex/benchmark/CMakeLists.txt
@@ -1,12 +1,15 @@
-cmake_minimum_required(VERSION 2.6)
project(Cech_complex_benchmark)
-# Do not forget to copy test files in current binary dir
-file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ # Do not forget to copy test files in current binary dir
+ file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-add_executable(cech_complex_benchmark cech_complex_benchmark.cpp)
-target_link_libraries(cech_complex_benchmark ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY})
-
-if (TBB_FOUND)
- target_link_libraries(cech_complex_benchmark ${TBB_LIBRARIES})
+ if(TARGET Boost::filesystem)
+ add_executable(cech_complex_benchmark cech_complex_benchmark.cpp)
+ target_link_libraries(cech_complex_benchmark Boost::filesystem)
+
+ if (TBB_FOUND)
+ target_link_libraries(cech_complex_benchmark ${TBB_LIBRARIES})
+ endif()
+ endif()
endif()
diff --git a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
index d2d71dbf..a0e727be 100644
--- a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
+++ b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
@@ -10,12 +10,13 @@
#include <gudhi/Points_off_io.h>
#include <gudhi/distance_functions.h>
-#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Clock.h>
#include <gudhi/Rips_complex.h>
#include <gudhi/Cech_complex.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/Miniball.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
#include "boost/filesystem.hpp" // includes all needed Boost.Filesystem declarations
@@ -26,107 +27,81 @@
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
using Point = std::vector<Filtration_value>;
-using Point_cloud = std::vector<Point>;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
using Rips_complex = Gudhi::rips_complex::Rips_complex<Filtration_value>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
-
-class Minimal_enclosing_ball_radius {
- public:
- // boost::range_value is not SFINAE-friendly so we cannot use it in the return type
- template <typename Point>
- typename std::iterator_traits<typename boost::range_iterator<Point>::type>::value_type operator()(
- const Point& p1, const Point& p2) const {
- // Type def
- using Point_cloud = std::vector<Point>;
- using Point_iterator = typename Point_cloud::const_iterator;
- using Coordinate_iterator = typename Point::const_iterator;
- using Min_sphere =
- typename Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- Point_cloud point_cloud;
- point_cloud.push_back(p1);
- point_cloud.push_back(p2);
-
- GUDHI_CHECK((p1.end() - p1.begin()) != (p2.end() - p2.begin()), "inconsistent point dimensions");
- Min_sphere min_sphere(p1.end() - p1.begin(), point_cloud.begin(), point_cloud.end());
- return std::sqrt(min_sphere.squared_radius());
- }
-};
+template<typename Kernel>
+Simplex_tree benchmark_cech(const std::string& off_file_points, const Filtration_value& radius, const int& dim_max, const bool exact) {
+ using Point_cgal = typename Kernel::Point_d;
+ using Points_off_reader_cgal = Gudhi::Points_off_reader<Point_cgal>;
+ using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
+
+ // Extract the points from the file filepoints
+ Points_off_reader_cgal off_reader_cgal(off_file_points);
+
+ Gudhi::Clock cech_clock("Cech computation");
+ Cech_complex cech_complex_from_points(off_reader_cgal.get_point_cloud(), radius, exact);
+ Simplex_tree cech_stree;
+ cech_complex_from_points.create_complex(cech_stree, dim_max);
+
+ // ------------------------------------------
+ // Display information about the Cech complex
+ // ------------------------------------------
+ double cech_sec = cech_clock.num_seconds();
+ std::clog << cech_sec << " ; ";
+ return cech_stree;
+}
int main(int argc, char* argv[]) {
- std::string off_file_points = "tore3D_1307.off";
- Filtration_value threshold = 1e20;
-
- // Extract the points from the file filepoints
- Points_off_reader off_reader(off_file_points);
-
- Gudhi::Clock euclidean_clock("Gudhi::Euclidean_distance");
- // Compute the proximity graph of the points
- Proximity_graph euclidean_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance());
-
- std::cout << euclidean_clock << std::endl;
-
- Gudhi::Clock miniball_clock("Minimal_enclosing_ball_radius");
- // Compute the proximity graph of the points
- Proximity_graph miniball_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Minimal_enclosing_ball_radius());
- std::cout << miniball_clock << std::endl;
-
- Gudhi::Clock common_miniball_clock("Gudhi::Minimal_enclosing_ball_radius()");
- // Compute the proximity graph of the points
- Proximity_graph common_miniball_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Gudhi::Minimal_enclosing_ball_radius());
- std::cout << common_miniball_clock << std::endl;
-
- boost::filesystem::path full_path(boost::filesystem::current_path());
- std::cout << "Current path is : " << full_path << std::endl;
-
- std::cout << "File name;Radius;Rips time;Cech time; Ratio Rips/Cech time;Rips nb simplices;Cech nb simplices;"
- << std::endl;
- boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end
- for (boost::filesystem::directory_iterator itr(boost::filesystem::current_path()); itr != end_itr; ++itr) {
- if (!boost::filesystem::is_directory(itr->status())) {
- if (itr->path().extension() == ".off") // see below
- {
- Points_off_reader off_reader(itr->path().string());
- Point p0 = off_reader.get_point_cloud()[0];
-
- for (Filtration_value radius = 0.1; radius < 0.4; radius += 0.1) {
- std::cout << itr->path().stem() << ";";
- std::cout << radius << ";";
- Gudhi::Clock rips_clock("Rips computation");
- Rips_complex rips_complex_from_points(off_reader.get_point_cloud(), radius,
- Gudhi::Minimal_enclosing_ball_radius());
- Simplex_tree rips_stree;
- rips_complex_from_points.create_complex(rips_stree, p0.size() - 1);
- // ------------------------------------------
- // Display information about the Rips complex
- // ------------------------------------------
- double rips_sec = rips_clock.num_seconds();
- std::cout << rips_sec << ";";
-
- Gudhi::Clock cech_clock("Cech computation");
- Cech_complex cech_complex_from_points(off_reader.get_point_cloud(), radius);
- Simplex_tree cech_stree;
- cech_complex_from_points.create_complex(cech_stree, p0.size() - 1);
- // ------------------------------------------
- // Display information about the Cech complex
- // ------------------------------------------
- double cech_sec = cech_clock.num_seconds();
- std::cout << cech_sec << ";";
- std::cout << cech_sec / rips_sec << ";";
-
- assert(rips_stree.num_simplices() >= cech_stree.num_simplices());
- std::cout << rips_stree.num_simplices() << ";";
- std::cout << cech_stree.num_simplices() << ";" << std::endl;
+ boost::filesystem::path full_path(boost::filesystem::current_path());
+ std::clog << "Current path is : " << full_path << std::endl;
+
+ std::clog << "File name ; Radius ; Rips time ; Dim-3 Fast Cech time ; Dynamic_dim Fast Cech time ; "
+ "Dim-3 Safe Cech time ; Dynamic_dim Safe Cech time ; Dim-3 Exact Cech time ; Dynamic_dim Exact Cech time ; "
+ "Cech nb simplices ; Rips nb simplices;"
+ << std::endl;
+ boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end
+ // For every ".off" file in the current directory, and for 3 predefined thresholds, compare Rips and various Cech constructions
+ for (boost::filesystem::directory_iterator itr(boost::filesystem::current_path()); itr != end_itr; ++itr) {
+ if (!boost::filesystem::is_directory(itr->status())) {
+ if (itr->path().extension() == ".off") {
+ Points_off_reader off_reader(itr->path().string());
+ Point p0 = off_reader.get_point_cloud()[0];
+ // Loop over the different thresholds
+ for (Filtration_value radius = 0.1; radius < 0.35; radius += 0.1) {
+ std::clog << itr->path().stem() << " ; ";
+ std::clog << radius << " ; ";
+
+ Gudhi::Clock rips_clock("Rips computation");
+ Rips_complex rips_complex_from_points(off_reader.get_point_cloud(), radius, Gudhi::Euclidean_distance());
+ Simplex_tree rips_stree;
+ int dim_max = p0.size() - 1;
+ rips_complex_from_points.create_complex(rips_stree, dim_max);
+ // ------------------------------------------
+ // Display information about the Rips complex
+ // ------------------------------------------
+ double rips_sec = rips_clock.num_seconds();
+ std::clog << rips_sec << " ; ";
+
+ // --------------
+ // Cech complex
+ // --------------
+ // Fast
+ benchmark_cech<CGAL::Epick_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, false);
+ benchmark_cech<CGAL::Epick_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, false);
+ // Safe
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, false);
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, false);
+ // Exact
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, true);
+ auto cech_stree = benchmark_cech<CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, true);
+
+ std::clog << cech_stree.num_simplices() << " ; ";
+ std::clog << rips_stree.num_simplices() << ";" << std::endl;
+ }
+ }
}
- }
}
- }
- return 0;
+ return 0;
}
diff --git a/src/Cech_complex/concept/SimplicialComplexForCech.h b/src/Cech_complex/concept/SimplicialComplexForCech.h
index 00c7df3a..6202fe92 100644
--- a/src/Cech_complex/concept/SimplicialComplexForCech.h
+++ b/src/Cech_complex/concept/SimplicialComplexForCech.h
@@ -47,8 +47,8 @@ struct SimplicialComplexForCech {
};
-} // namespace alpha_complex
+} // namespace cech_complex
} // namespace Gudhi
-#endif // CONCEPT_ALPHA_COMPLEX_SIMPLICIAL_COMPLEX_FOR_ALPHA_H_
+#endif // CONCEPT_CECH_COMPLEX_SIMPLICIAL_COMPLEX_FOR_CECH_H_
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 80c88dc6..73093c07 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -17,7 +17,7 @@ namespace cech_complex {
/** \defgroup cech_complex Čech complex
*
- * \author Vincent Rouvreau
+ * \author Vincent Rouvreau, Hind montassif
*
* @{
*
@@ -28,7 +28,7 @@ namespace cech_complex {
* <a target="_blank" href="https://en.wikipedia.org/wiki/Simplicial_complex">simplicial complex</a> constructed
* from a proximity graph. The set of all simplices is filtered by the radius of their minimal enclosing ball.
*
- * The input shall be a point cloud in an Euclidean space.
+ * The input shall be a range of points where a point is defined as <a target="_blank" href="https://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Point__d.html">CGAL kernel Point_d.</a>
*
* \remark For people only interested in the topology of the \ref cech_complex (for instance persistence),
* \ref alpha_complex is equivalent to the \ref cech_complex and much smaller if you do not bound the radii.
@@ -37,8 +37,7 @@ namespace cech_complex {
* \subsection cechalgorithm Algorithm
*
* Cech_complex first builds a proximity graph from a point cloud.
- * The filtration value of each edge of the `Gudhi::Proximity_graph` is computed from
- * `Gudhi::Minimal_enclosing_ball_radius` function.
+ * The filtration value of each edge of the `Gudhi::Proximity_graph` is computed using CGAL kernel functions.
*
* All edges that have a filtration value strictly greater than a user given maximal radius value, \f$max\_radius\f$,
* are not inserted into the complex.
@@ -60,20 +59,9 @@ namespace cech_complex {
*
* \image html "cech_complex_representation.png" "Čech complex expansion"
*
- * The minimal ball radius computation is insured by
- * <a target="_blank" href="https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html">
- * the miniball software (V3.0)</a> - Smallest Enclosing Balls of Points - and distributed with GUDHI.
- * Please refer to
- * <a target="_blank" href="https://people.inf.ethz.ch/gaertner/subdir/texts/own_work/esa99_final.pdf">
- * the miniball software design description</a> for more information about this computation.
- *
* This radius computation is the reason why the Cech_complex is taking much more time to be computed than the
* \ref rips_complex but it offers more topological guarantees.
*
- * If the Cech_complex interfaces are not detailed enough for your need, please refer to
- * <a href="_cech_complex_2cech_complex_step_by_step_8cpp-example.html">
- * cech_complex_step_by_step.cpp</a> example, where the graph construction over the Simplex_tree is more detailed.
- *
* \subsection cechpointscloudexample Example from a point cloud
*
* This example builds the proximity graph from the given points, and maximal radius values.
@@ -81,7 +69,7 @@ namespace cech_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Cech_complex/cech_complex_example_from_points.cpp
+ * \include cech_complex_example_from_points.cpp
*
* When launching (maximal enclosing ball radius is 1., is expanded until dimension 2):
*
@@ -90,7 +78,7 @@ namespace cech_complex {
*
* the program output is:
*
- * \include Cech_complex/cech_complex_example_from_points_for_doc.txt
+ * \include cech_complex_example_from_points_for_doc.txt
*
*/
/** @} */ // end defgroup cech_complex
diff --git a/src/Cech_complex/example/CMakeLists.txt b/src/Cech_complex/example/CMakeLists.txt
index ab391215..7d52ed5e 100644
--- a/src/Cech_complex/example/CMakeLists.txt
+++ b/src/Cech_complex/example/CMakeLists.txt
@@ -1,16 +1,9 @@
-cmake_minimum_required(VERSION 2.6)
project(Cech_complex_examples)
-add_executable ( Cech_complex_example_step_by_step cech_complex_step_by_step.cpp )
-target_link_libraries(Cech_complex_example_step_by_step ${Boost_PROGRAM_OPTIONS_LIBRARY})
-if (TBB_FOUND)
- target_link_libraries(Cech_complex_example_step_by_step ${TBB_LIBRARIES})
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ add_executable ( Cech_complex_example_from_points cech_complex_example_from_points.cpp)
+ if (TBB_FOUND)
+ target_link_libraries(Cech_complex_example_from_points ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Cech_complex_example_from_points COMMAND $<TARGET_FILE:Cech_complex_example_from_points>)
endif()
-add_test(NAME Cech_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:Cech_complex_example_step_by_step>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-d" "3")
-
-add_executable ( Cech_complex_example_from_points cech_complex_example_from_points.cpp)
-if (TBB_FOUND)
- target_link_libraries(Cech_complex_example_from_points ${TBB_LIBRARIES})
-endif()
-add_test(NAME Cech_complex_example_from_points COMMAND $<TARGET_FILE:Cech_complex_example_from_points>)
diff --git a/src/Cech_complex/example/cech_complex_example_from_points.cpp b/src/Cech_complex/example/cech_complex_example_from_points.cpp
index 3cc5a4df..ef9071ec 100644
--- a/src/Cech_complex/example/cech_complex_example_from_points.cpp
+++ b/src/Cech_complex/example/cech_complex_example_from_points.cpp
@@ -1,30 +1,33 @@
#include <gudhi/Cech_complex.h>
#include <gudhi/Simplex_tree.h>
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
+
#include <iostream>
#include <string>
#include <vector>
-#include <array>
int main() {
// Type definitions
- using Point_cloud = std::vector<std::array<double, 2>>;
using Simplex_tree = Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_fast_persistence>;
using Filtration_value = Simplex_tree::Filtration_value;
- using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
+ using Kernel = CGAL::Epeck_d<CGAL::Dimension_tag<2>>;
+ using Point = typename Kernel::Point_d;
+ using Point_cloud = std::vector<Point>;
+ using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
Point_cloud points;
- points.push_back({1., 0.}); // 0
- points.push_back({0., 1.}); // 1
- points.push_back({2., 1.}); // 2
- points.push_back({3., 2.}); // 3
- points.push_back({0., 3.}); // 4
- points.push_back({3. + std::sqrt(3.), 3.}); // 5
- points.push_back({1., 4.}); // 6
- points.push_back({3., 4.}); // 7
- points.push_back({2., 4. + std::sqrt(3.)}); // 8
- points.push_back({0., 4.}); // 9
- points.push_back({-0.5, 2.}); // 10
+ points.emplace_back(1., 0.); // 0
+ points.emplace_back(0., 1.); // 1
+ points.emplace_back(2., 1.); // 2
+ points.emplace_back(3., 2.); // 3
+ points.emplace_back(0., 3.); // 4
+ points.emplace_back(3. + std::sqrt(3.), 3.); // 5
+ points.emplace_back(1., 4.); // 6
+ points.emplace_back(3., 4.); // 7
+ points.emplace_back(2., 4. + std::sqrt(3.)); // 8
+ points.emplace_back(0., 4.); // 9
+ points.emplace_back(-0.5, 2.); // 10
// ----------------------------------------------------------------------------
// Init of a Cech complex from points
@@ -37,18 +40,18 @@ int main() {
// ----------------------------------------------------------------------------
// Display information about the one skeleton Cech complex
// ----------------------------------------------------------------------------
- std::cout << "Cech complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
+ std::clog << "Cech complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
<< stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << stree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
}
return 0;
}
diff --git a/src/Cech_complex/example/cech_complex_step_by_step.cpp b/src/Cech_complex/example/cech_complex_step_by_step.cpp
deleted file mode 100644
index b3d05697..00000000
--- a/src/Cech_complex/example/cech_complex_step_by_step.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Vincent Rouvreau
- *
- * Copyright (C) 2018 Inria
- *
- * Modification(s):
- * - YYYY/MM Author: Description of the modification
- */
-
-#include <gudhi/graph_simplicial_complex.h>
-#include <gudhi/distance_functions.h>
-#include <gudhi/Simplex_tree.h>
-#include <gudhi/Points_off_io.h>
-
-#include <gudhi/Miniball.hpp>
-
-#include <boost/program_options.hpp>
-
-#include <string>
-#include <vector>
-#include <limits> // infinity
-#include <utility> // for pair
-#include <map>
-
-// ----------------------------------------------------------------------------
-// rips_persistence_step_by_step is an example of each step that is required to
-// build a Rips over a Simplex_tree. Please refer to rips_persistence to see
-// how to do the same thing with the Rips_complex wrapper for less detailed
-// steps.
-// ----------------------------------------------------------------------------
-
-// Types definition
-using Simplex_tree = Gudhi::Simplex_tree<>;
-using Simplex_handle = Simplex_tree::Simplex_handle;
-using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<double>;
-using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
-
-class Cech_blocker {
- private:
- using Point_cloud = std::vector<Point>;
- using Point_iterator = Point_cloud::const_iterator;
- using Coordinate_iterator = Point::const_iterator;
- using Min_sphere = Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- public:
- bool operator()(Simplex_handle sh) {
- std::vector<Point> points;
- for (auto vertex : simplex_tree_.simplex_vertex_range(sh)) {
- points.push_back(point_cloud_[vertex]);
-#ifdef DEBUG_TRACES
- std::cout << "#(" << vertex << ")#";
-#endif // DEBUG_TRACES
- }
- Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points);
-#ifdef DEBUG_TRACES
- std::cout << "radius = " << radius << " - " << (radius > max_radius_) << std::endl;
-#endif // DEBUG_TRACES
- simplex_tree_.assign_filtration(sh, radius);
- return (radius > max_radius_);
- }
- Cech_blocker(Simplex_tree& simplex_tree, Filtration_value max_radius, const std::vector<Point>& point_cloud)
- : simplex_tree_(simplex_tree), max_radius_(max_radius), point_cloud_(point_cloud) {
- dimension_ = point_cloud_[0].size();
- }
-
- private:
- Simplex_tree simplex_tree_;
- Filtration_value max_radius_;
- std::vector<Point> point_cloud_;
- int dimension_;
-};
-
-void program_options(int argc, char* argv[], std::string& off_file_points, Filtration_value& max_radius, int& dim_max);
-
-int main(int argc, char* argv[]) {
- std::string off_file_points;
- Filtration_value max_radius;
- int dim_max;
-
- program_options(argc, argv, off_file_points, max_radius, dim_max);
-
- // Extract the points from the file filepoints
- Points_off_reader off_reader(off_file_points);
-
- // Compute the proximity graph of the points
- Proximity_graph prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(off_reader.get_point_cloud(), max_radius,
- Gudhi::Minimal_enclosing_ball_radius());
-
- // Construct the Rips complex in a Simplex Tree
- Simplex_tree st;
- // insert the proximity graph in the simplex tree
- st.insert_graph(prox_graph);
- // expand the graph until dimension dim_max
- st.expansion_with_blockers(dim_max, Cech_blocker(st, max_radius, off_reader.get_point_cloud()));
-
- std::cout << "The complex contains " << st.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << st.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
-
-#if DEBUG_TRACES
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
- for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " "
- << "[" << st.filtration(f_simplex) << "] ";
- for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << static_cast<int>(vertex) << " ";
- }
- std::cout << std::endl;
- }
-#endif // DEBUG_TRACES
-
- return 0;
-}
-
-void program_options(int argc, char* argv[], std::string& off_file_points, Filtration_value& max_radius, int& dim_max) {
- namespace po = boost::program_options;
- po::options_description hidden("Hidden options");
- hidden.add_options()("input-file", po::value<std::string>(&off_file_points),
- "Name of an OFF file containing a point set.\n");
-
- po::options_description visible("Allowed options", 100);
- visible.add_options()("help,h", "produce help message")(
- "max-radius,r",
- po::value<Filtration_value>(&max_radius)->default_value(std::numeric_limits<Filtration_value>::infinity()),
- "Maximal length of an edge for the Rips complex construction.")(
- "cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
- "Maximal dimension of the Rips complex we want to compute.");
-
- po::positional_options_description pos;
- pos.add("input-file", 1);
-
- po::options_description all;
- all.add(visible).add(hidden);
-
- po::variables_map vm;
- po::store(po::command_line_parser(argc, argv).options(all).positional(pos).run(), vm);
- po::notify(vm);
-
- if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Construct a Cech complex defined on a set of input points.\n \n";
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
- exit(-1);
- }
-}
diff --git a/src/Cech_complex/include/gudhi/Cech_complex.h b/src/Cech_complex/include/gudhi/Cech_complex.h
index b0871e10..dbdf5e93 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex.h
@@ -1,24 +1,24 @@
/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
* See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Vincent Rouvreau
+ * Author(s): Vincent Rouvreau, Hind Montassif
*
* Copyright (C) 2018 Inria
*
* Modification(s):
* - YYYY/MM Author: Description of the modification
+ * - 2022/02 Hind Montassif : Replace MiniBall with Sphere_circumradius
*/
#ifndef CECH_COMPLEX_H_
#define CECH_COMPLEX_H_
-#include <gudhi/distance_functions.h> // for Gudhi::Minimal_enclosing_ball_radius
+#include <gudhi/Sphere_circumradius.h> // for Gudhi::cech_complex::Sphere_circumradius
#include <gudhi/graph_simplicial_complex.h> // for Gudhi::Proximity_graph
#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
#include <gudhi/Cech_complex_blocker.h> // for Gudhi::cech_complex::Cech_blocker
#include <iostream>
#include <stdexcept> // for exception management
-#include <vector>
namespace Gudhi {
@@ -26,55 +26,55 @@ namespace cech_complex {
/**
* \class Cech_complex
- * \brief Cech complex data structure.
+ * \brief Cech complex class.
*
* \ingroup cech_complex
*
* \details
- * The data structure is a proximity graph, containing edges when the edge length is less or equal
- * to a given max_radius. Edge length is computed from `Gudhi::Minimal_enclosing_ball_radius` distance function.
+ * Cech complex is a simplicial complex where the set of all simplices is filtered
+ * by the radius of their minimal enclosing ball and bounded by the given max_radius.
*
- * \tparam SimplicialComplexForProximityGraph furnishes `Vertex_handle` and `Filtration_value` type definition required
- * by `Gudhi::Proximity_graph`.
+ * \tparam Kernel CGAL kernel: either Epick_d or Epeck_d.
+ *
+ * \tparam SimplicialComplexForCechComplex furnishes `Vertex_handle` and `Filtration_value` type definition required
+ * by `Gudhi::Proximity_graph` and Cech blocker.
*
- * \tparam ForwardPointRange must be a range for which `std::begin()` and `std::end()` methods return input
- * iterators on a point. `std::begin()` and `std::end()` methods are also required for a point.
*/
-template <typename SimplicialComplexForProximityGraph, typename ForwardPointRange>
+template <typename Kernel, typename SimplicialComplexForCechComplex>
class Cech_complex {
private:
// Required by compute_proximity_graph
- using Vertex_handle = typename SimplicialComplexForProximityGraph::Vertex_handle;
- using Filtration_value = typename SimplicialComplexForProximityGraph::Filtration_value;
- using Proximity_graph = Gudhi::Proximity_graph<SimplicialComplexForProximityGraph>;
-
- // Retrieve Coordinate type from ForwardPointRange
- using Point_from_range_iterator = typename boost::range_const_iterator<ForwardPointRange>::type;
- using Point_from_range = typename std::iterator_traits<Point_from_range_iterator>::value_type;
- using Coordinate_iterator = typename boost::range_const_iterator<Point_from_range>::type;
- using Coordinate = typename std::iterator_traits<Coordinate_iterator>::value_type;
-
- public:
- // Point and Point_cloud type definition
- using Point = std::vector<Coordinate>;
- using Point_cloud = std::vector<Point>;
-
- public:
- /** \brief Cech_complex constructor from a list of points.
+ using Vertex_handle = typename SimplicialComplexForCechComplex::Vertex_handle;
+ using Filtration_value = typename SimplicialComplexForCechComplex::Filtration_value;
+ using Proximity_graph = Gudhi::Proximity_graph<SimplicialComplexForCechComplex>;
+
+ using cech_blocker = Cech_blocker<SimplicialComplexForCechComplex, Cech_complex, Kernel>;
+
+ using Point_d = typename cech_blocker::Point_d;
+ using Point_cloud = std::vector<Point_d>;
+
+ // Numeric type of coordinates in the kernel
+ using FT = typename cech_blocker::FT;
+ // Sphere is a pair of point and squared radius.
+ using Sphere = typename cech_blocker::Sphere;
+
+ public:
+ /** \brief Cech_complex constructor from a range of points.
*
- * @param[in] points Range of points.
+ * @param[in] points Range of points where each point is defined as `kernel::Point_d`.
* @param[in] max_radius Maximal radius value.
- *
- * \tparam ForwardPointRange must be a range of Point. Point must be a range of <b>copyable</b> Cartesian coordinates.
+ * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
*
*/
- Cech_complex(const ForwardPointRange& points, Filtration_value max_radius) : max_radius_(max_radius) {
- // Point cloud deep copy
- point_cloud_.reserve(boost::size(points));
- for (auto&& point : points) point_cloud_.emplace_back(std::begin(point), std::end(point));
+ template<typename InputPointRange >
+ Cech_complex(const InputPointRange & points, Filtration_value max_radius, const bool exact = false) : max_radius_(max_radius), exact_(exact) {
- cech_skeleton_graph_ = Gudhi::compute_proximity_graph<SimplicialComplexForProximityGraph>(
- point_cloud_, max_radius_, Gudhi::Minimal_enclosing_ball_radius());
+ point_cloud_.assign(std::begin(points), std::end(points));
+
+ cech_skeleton_graph_ = Gudhi::compute_proximity_graph<SimplicialComplexForCechComplex>(
+ point_cloud_, max_radius_, Sphere_circumradius<Kernel, Filtration_value>(exact));
}
/** \brief Initializes the simplicial complex from the proximity graph and expands it until a given maximal
@@ -85,7 +85,6 @@ class Cech_complex {
* @exception std::invalid_argument In debug mode, if `complex.num_vertices()` does not return 0.
*
*/
- template <typename SimplicialComplexForCechComplex>
void create_complex(SimplicialComplexForCechComplex& complex, int dim_max) {
GUDHI_CHECK(complex.num_vertices() == 0,
std::invalid_argument("Cech_complex::create_complex - simplicial complex is not empty"));
@@ -93,8 +92,7 @@ class Cech_complex {
// insert the proximity graph in the simplicial complex
complex.insert_graph(cech_skeleton_graph_);
// expand the graph until dimension dim_max
- complex.expansion_with_blockers(dim_max,
- Cech_blocker<SimplicialComplexForCechComplex, Cech_complex>(&complex, this));
+ complex.expansion_with_blockers(dim_max, cech_blocker(&complex, this));
}
/** @return max_radius value given at construction. */
@@ -103,12 +101,24 @@ class Cech_complex {
/** @param[in] vertex Point position in the range.
* @return The point.
*/
- const Point& get_point(Vertex_handle vertex) const { return point_cloud_[vertex]; }
+ const Point_d& get_point(Vertex_handle vertex) const { return point_cloud_[vertex]; }
+
+ /**
+ * @return Vector of cached spheres.
+ */
+ std::vector<Sphere> & get_cache() { return cache_; }
+
+ /** \brief Check exact option
+ * @return Exact option.
+ */
+ const bool is_exact() { return exact_; }
private:
Proximity_graph cech_skeleton_graph_;
Filtration_value max_radius_;
Point_cloud point_cloud_;
+ std::vector<Sphere> cache_;
+ const bool exact_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
index 068cdde3..e78e37b7 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
@@ -11,10 +11,12 @@
#ifndef CECH_COMPLEX_BLOCKER_H_
#define CECH_COMPLEX_BLOCKER_H_
-#include <gudhi/distance_functions.h> // for Gudhi::Minimal_enclosing_ball_radius
+#include <CGAL/NT_converter.h> // for casting from FT to Filtration_value
+#include <CGAL/Lazy_exact_nt.h> // for CGAL::exact
#include <iostream>
#include <vector>
+#include <set>
#include <cmath> // for std::sqrt
namespace Gudhi {
@@ -30,37 +32,104 @@ namespace cech_complex {
* \details
* Čech blocker is an oracle constructed from a Cech_complex and a simplicial complex.
*
- * \tparam SimplicialComplexForProximityGraph furnishes `Simplex_handle` and `Filtration_value` type definition,
+ * \tparam SimplicialComplexForCech furnishes `Simplex_handle` and `Filtration_value` type definition,
* `simplex_vertex_range(Simplex_handle sh)`and `assign_filtration(Simplex_handle sh, Filtration_value filt)` methods.
*
- * \tparam Chech_complex is required by the blocker.
+ * \tparam Cech_complex is required by the blocker.
+ *
+ * \tparam Kernel CGAL kernel: either Epick_d or Epeck_d.
*/
-template <typename SimplicialComplexForCech, typename Cech_complex>
+template <typename SimplicialComplexForCech, typename Cech_complex, typename Kernel>
class Cech_blocker {
+
+ public:
+
+ using Point_d = typename Kernel::Point_d;
+ // Numeric type of coordinates in the kernel
+ using FT = typename Kernel::FT;
+ // Sphere is a pair of point and squared radius.
+ using Sphere = typename std::pair<Point_d, FT>;
+
private:
- using Point_cloud = typename Cech_complex::Point_cloud;
using Simplex_handle = typename SimplicialComplexForCech::Simplex_handle;
using Filtration_value = typename SimplicialComplexForCech::Filtration_value;
+ using Simplex_key = typename SimplicialComplexForCech::Simplex_key;
+
+ template<class PointIterator>
+ Sphere get_sphere(PointIterator begin, PointIterator end) const {
+ Point_d c = kernel_.construct_circumcenter_d_object()(begin, end);
+ FT r = kernel_.squared_distance_d_object()(c, *begin);
+ return std::make_pair(std::move(c), std::move(r));
+ }
public:
+
/** \internal \brief Čech complex blocker operator() - the oracle - assigns the filtration value from the simplex
* radius and returns if the simplex expansion must be blocked.
* \param[in] sh The Simplex_handle.
* \return true if the simplex radius is greater than the Cech_complex max_radius*/
bool operator()(Simplex_handle sh) {
+ using Point_cloud = std::vector<Point_d>;
+ Filtration_value radius = 0;
+ bool is_min_enclos_ball = false;
Point_cloud points;
- for (auto vertex : sc_ptr_->simplex_vertex_range(sh)) {
- points.push_back(cc_ptr_->get_point(vertex));
+ points.reserve(sc_ptr_->dimension(sh)+1);
+
+ // for each face of simplex sh, test outsider point is indeed inside enclosing ball, if yes, take it and exit loop, otherwise, new sphere is circumsphere of all vertices
+ for (auto face_opposite_vertex : sc_ptr_->boundary_opposite_vertex_simplex_range(sh)) {
+ auto k = sc_ptr_->key(face_opposite_vertex.first);
+ Simplex_key sph_key;
+ if(k != sc_ptr_->null_key()) {
+ sph_key = k;
+ }
+ else {
+ for (auto vertex : sc_ptr_->simplex_vertex_range(face_opposite_vertex.first)) {
+ points.push_back(cc_ptr_->get_point(vertex));
+#ifdef DEBUG_TRACES
+ std::clog << "#(" << vertex << ")#";
+#endif // DEBUG_TRACES
+ }
+ // Put edge sphere in cache
+ sph_key = cc_ptr_->get_cache().size();
+ sc_ptr_->assign_key(face_opposite_vertex.first, sph_key);
+ cc_ptr_->get_cache().push_back(get_sphere(points.cbegin(), points.cend()));
+ // Clear face points
+ points.clear();
+ }
+ // Check if the minimal enclosing ball of current face contains the extra point/opposite vertex
+ Sphere const& sph = cc_ptr_->get_cache()[sph_key];
+ if (kernel_.squared_distance_d_object()(sph.first, cc_ptr_->get_point(face_opposite_vertex.second)) <= sph.second) {
+ is_min_enclos_ball = true;
+ sc_ptr_->assign_key(sh, sph_key);
+ radius = sc_ptr_->filtration(face_opposite_vertex.first);
#ifdef DEBUG_TRACES
- std::cout << "#(" << vertex << ")#";
+ std::clog << "center: " << sph.first << ", radius: " << radius << std::endl;
#endif // DEBUG_TRACES
+ break;
+ }
}
- Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points);
+ // Spheres of each face don't contain the whole simplex
+ if(!is_min_enclos_ball) {
+ for (auto vertex : sc_ptr_->simplex_vertex_range(sh)) {
+ points.push_back(cc_ptr_->get_point(vertex));
+ }
+ Sphere sph = get_sphere(points.cbegin(), points.cend());
+#if CGAL_VERSION_NR >= 1050000000
+ if(cc_ptr_->is_exact()) CGAL::exact(sph.second);
+#endif
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+ radius = std::sqrt(cast_to_fv(sph.second));
+
+ sc_ptr_->assign_key(sh, cc_ptr_->get_cache().size());
+ cc_ptr_->get_cache().push_back(std::move(sph));
+ }
+
#ifdef DEBUG_TRACES
- if (radius > cc_ptr_->max_radius()) std::cout << "radius > max_radius => expansion is blocked\n";
+ if (radius > cc_ptr_->max_radius()) std::clog << "radius > max_radius => expansion is blocked\n";
#endif // DEBUG_TRACES
- sc_ptr_->assign_filtration(sh, radius);
+ // Check that the filtration to be assigned (radius) would be valid
+ if (radius > sc_ptr_->filtration(sh)) sc_ptr_->assign_filtration(sh, radius);
return (radius > cc_ptr_->max_radius());
}
@@ -70,6 +139,7 @@ class Cech_blocker {
private:
SimplicialComplexForCech* sc_ptr_;
Cech_complex* cc_ptr_;
+ Kernel kernel_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT b/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT
deleted file mode 100644
index dbe4c553..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT
+++ /dev/null
@@ -1,4 +0,0 @@
-The miniball software is available under the GNU General Public License (GPLv3 - https://www.gnu.org/copyleft/gpl.html).
-If your intended use is not compliant with this license, please buy a commercial license (EUR 500 - https://people.inf.ethz.ch/gaertner/subdir/software/miniball/license.html).
-You need a license if the software that you develop using Miniball V3.0 is not open source.
-
diff --git a/src/Cech_complex/include/gudhi/Miniball.README b/src/Cech_complex/include/gudhi/Miniball.README
deleted file mode 100644
index 033d8953..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.README
+++ /dev/null
@@ -1,26 +0,0 @@
-https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html
-
-Smallest Enclosing Balls of Points - Fast and Robust in C++.
-(high-quality software for smallest enclosing balls of balls is available in the computational geometry algorithms library CGAL)
-
-
-This is the miniball software (V3.0) for computing smallest enclosing balls of points in arbitrary dimensions. It consists of a C++ header file Miniball.hpp (around 500 lines of code) and two example programs miniball_example.cpp and miniball_example_containers.cpp that demonstrate the usage. The first example stores the coordinates of the input points in a two-dimensional array, the second example uses a list of vectors to show how generic containers can be used.
-
-Credits: Aditya Gupta and Alexandros Konstantinakis-Karmis have significantly contributed to this version of the software.
-
-Changes - https://people.inf.ethz.ch/gaertner/subdir/software/miniball/changes.txt - from previous versions.
-
-The theory - https://people.inf.ethz.ch/gaertner/subdir/texts/own_work/esa99_final.pdf - behind the miniball software (Proc. 7th Annual European Symposium on Algorithms (ESA), Lecture Notes in Computer Science 1643, Springer-Verlag, pp.325-338, 1999).
-
-Main Features:
-
- Very fast in low dimensions. 1 million points in 5-space are processed within 0.05 seconds on any recent machine.
-
- High numerical stability. Almost all input degeneracies (cospherical points, multiple points, points very close together) are routinely handled.
-
- Easily integrates into your code. You can freely choose the coordinate type of your points and the container to store the points. If you still need to adapt the code, the header is small and readable and contains documentation for all major methods.
-
-
-Changes done for the GUDHI version of MiniBall:
- - Add include guard
- - Move Miniball namespace inside a new Gudhi namespace
diff --git a/src/Cech_complex/include/gudhi/Miniball.hpp b/src/Cech_complex/include/gudhi/Miniball.hpp
deleted file mode 100644
index ce6cbb5b..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.hpp
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copright (C) 1999-2013, Bernd Gaertner
-// $Rev: 3581 $
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-//
-// Contact:
-// --------
-// Bernd Gaertner
-// Institute of Theoretical Computer Science
-// ETH Zuerich
-// CAB G31.1
-// CH-8092 Zuerich, Switzerland
-// http://www.inf.ethz.ch/personal/gaertner
-
-#ifndef MINIBALL_HPP_
-#define MINIBALL_HPP_
-
-#include <cassert>
-#include <algorithm>
-#include <list>
-#include <ctime>
-#include <limits>
-
-namespace Gudhi {
-
-namespace Miniball {
-
- // Global Functions
- // ================
- template <typename NT>
- inline NT mb_sqr (NT r) {return r*r;}
-
- // Functors
- // ========
-
- // functor to map a point iterator to the corresponding coordinate iterator;
- // generic version for points whose coordinate containers have begin()
- template < typename Pit_, typename Cit_ >
- struct CoordAccessor {
- typedef Pit_ Pit;
- typedef Cit_ Cit;
- inline Cit operator() (Pit it) const { return (*it).begin(); }
- };
-
- // partial specialization for points whose coordinate containers are arrays
- template < typename Pit_, typename Cit_ >
- struct CoordAccessor<Pit_, Cit_*> {
- typedef Pit_ Pit;
- typedef Cit_* Cit;
- inline Cit operator() (Pit it) const { return *it; }
- };
-
- // Class Declaration
- // =================
-
- template <typename CoordAccessor>
- class Miniball {
- private:
- // types
- // The iterator type to go through the input points
- typedef typename CoordAccessor::Pit Pit;
- // The iterator type to go through the coordinates of a single point.
- typedef typename CoordAccessor::Cit Cit;
- // The coordinate type
- typedef typename std::iterator_traits<Cit>::value_type NT;
- // The iterator to go through the support points
- typedef typename std::list<Pit>::iterator Sit;
-
- // data members...
- const int d; // dimension
- Pit points_begin;
- Pit points_end;
- CoordAccessor coord_accessor;
- double time;
- const NT nt0; // NT(0)
-
- //...for the algorithms
- std::list<Pit> L;
- Sit support_end;
- int fsize; // number of forced points
- int ssize; // number of support points
-
- // ...for the ball updates
- NT* current_c;
- NT current_sqr_r;
- NT** c;
- NT* sqr_r;
-
- // helper arrays
- NT* q0;
- NT* z;
- NT* f;
- NT** v;
- NT** a;
-
- public:
- // The iterator type to go through the support points
- typedef typename std::list<Pit>::const_iterator SupportPointIterator;
-
- // PRE: [begin, end) is a nonempty range
- // POST: computes the smallest enclosing ball of the points in the range
- // [begin, end); the functor a maps a point iterator to an iterator
- // through the d coordinates of the point
- Miniball (int d_, Pit begin, Pit end, CoordAccessor ca = CoordAccessor());
-
- // POST: returns a pointer to the first element of an array that holds
- // the d coordinates of the center of the computed ball
- const NT* center () const;
-
- // POST: returns the squared radius of the computed ball
- NT squared_radius () const;
-
- // POST: returns the number of support points of the computed ball;
- // the support points form a minimal set with the same smallest
- // enclosing ball as the input set; in particular, the support
- // points are on the boundary of the computed ball, and their
- // number is at most d+1
- int nr_support_points () const;
-
- // POST: returns an iterator to the first support point
- SupportPointIterator support_points_begin () const;
-
- // POST: returns a past-the-end iterator for the range of support points
- SupportPointIterator support_points_end () const;
-
- // POST: returns the maximum excess of any input point w.r.t. the computed
- // ball, divided by the squared radius of the computed ball. The
- // excess of a point is the difference between its squared distance
- // from the center and the squared radius; Ideally, the return value
- // is 0. subopt is set to the absolute value of the most negative
- // coefficient in the affine combination of the support points that
- // yields the center. Ideally, this is a convex combination, and there
- // is no negative coefficient in which case subopt is set to 0.
- NT relative_error (NT& subopt) const;
-
- // POST: return true if the relative error is at most tol, and the
- // suboptimality is 0; the default tolerance is 10 times the
- // coordinate type's machine epsilon
- bool is_valid (NT tol = NT(10) * std::numeric_limits<NT>::epsilon()) const;
-
- // POST: returns the time in seconds taken by the constructor call for
- // computing the smallest enclosing ball
- double get_time() const;
-
- // POST: deletes dynamically allocated arrays
- ~Miniball();
-
- private:
- void mtf_mb (Sit n);
- void mtf_move_to_front (Sit j);
- void pivot_mb (Pit n);
- void pivot_move_to_front (Pit j);
- NT excess (Pit pit) const;
- void pop ();
- bool push (Pit pit);
- NT suboptimality () const;
- void create_arrays();
- void delete_arrays();
- };
-
- // Class Definition
- // ================
- template <typename CoordAccessor>
- Miniball<CoordAccessor>::Miniball (int d_, Pit begin, Pit end,
- CoordAccessor ca)
- : d (d_),
- points_begin (begin),
- points_end (end),
- coord_accessor (ca),
- time (clock()),
- nt0 (NT(0)),
- L(),
- support_end (L.begin()),
- fsize(0),
- ssize(0),
- current_c (NULL),
- current_sqr_r (NT(-1)),
- c (NULL),
- sqr_r (NULL),
- q0 (NULL),
- z (NULL),
- f (NULL),
- v (NULL),
- a (NULL)
- {
- assert (points_begin != points_end);
- create_arrays();
-
- // set initial center
- for (int j=0; j<d; ++j) c[0][j] = nt0;
- current_c = c[0];
-
- // compute miniball
- pivot_mb (points_end);
-
- // update time
- time = (clock() - time) / CLOCKS_PER_SEC;
- }
-
- template <typename CoordAccessor>
- Miniball<CoordAccessor>::~Miniball()
- {
- delete_arrays();
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::create_arrays()
- {
- c = new NT*[d+1];
- v = new NT*[d+1];
- a = new NT*[d+1];
- for (int i=0; i<d+1; ++i) {
- c[i] = new NT[d];
- v[i] = new NT[d];
- a[i] = new NT[d];
- }
- sqr_r = new NT[d+1];
- q0 = new NT[d];
- z = new NT[d+1];
- f = new NT[d+1];
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::delete_arrays()
- {
- delete[] f;
- delete[] z;
- delete[] q0;
- delete[] sqr_r;
- for (int i=0; i<d+1; ++i) {
- delete[] a[i];
- delete[] v[i];
- delete[] c[i];
- }
- delete[] a;
- delete[] v;
- delete[] c;
- }
-
- template <typename CoordAccessor>
- const typename Miniball<CoordAccessor>::NT*
- Miniball<CoordAccessor>::center () const
- {
- return current_c;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::squared_radius () const
- {
- return current_sqr_r;
- }
-
- template <typename CoordAccessor>
- int Miniball<CoordAccessor>::nr_support_points () const
- {
- assert (ssize < d+2);
- return ssize;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::SupportPointIterator
- Miniball<CoordAccessor>::support_points_begin () const
- {
- return L.begin();
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::SupportPointIterator
- Miniball<CoordAccessor>::support_points_end () const
- {
- return support_end;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::relative_error (NT& subopt) const
- {
- NT e, max_e = nt0;
- // compute maximum absolute excess of support points
- for (SupportPointIterator it = support_points_begin();
- it != support_points_end(); ++it) {
- e = excess (*it);
- if (e < nt0) e = -e;
- if (e > max_e) {
- max_e = e;
- }
- }
- // compute maximum excess of any point
- for (Pit i = points_begin; i != points_end; ++i)
- if ((e = excess (i)) > max_e)
- max_e = e;
-
- subopt = suboptimality();
- assert (current_sqr_r > nt0 || max_e == nt0);
- return (current_sqr_r == nt0 ? nt0 : max_e / current_sqr_r);
- }
-
- template <typename CoordAccessor>
- bool Miniball<CoordAccessor>::is_valid (NT tol) const
- {
- NT suboptimality;
- return ( (relative_error (suboptimality) <= tol) && (suboptimality == 0) );
- }
-
- template <typename CoordAccessor>
- double Miniball<CoordAccessor>::get_time() const
- {
- return time;
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::mtf_mb (Sit n)
- {
- // Algorithm 1: mtf_mb (L_{n-1}, B), where L_{n-1} = [L.begin, n)
- // B: the set of forced points, defining the current ball
- // S: the superset of support points computed by the algorithm
- // --------------------------------------------------------------
- // from B. Gaertner, Fast and Robust Smallest Enclosing Balls, ESA 1999,
- // http://www.inf.ethz.ch/personal/gaertner/texts/own_work/esa99_final.pdf
-
- // PRE: B = S
- assert (fsize == ssize);
-
- support_end = L.begin();
- if ((fsize) == d+1) return;
-
- // incremental construction
- for (Sit i = L.begin(); i != n;)
- {
- // INV: (support_end - L.begin() == |S|-|B|)
- assert (std::distance (L.begin(), support_end) == ssize - fsize);
-
- Sit j = i++;
- if (excess(*j) > nt0)
- if (push(*j)) { // B := B + p_i
- mtf_mb (j); // mtf_mb (L_{i-1}, B + p_i)
- pop(); // B := B - p_i
- mtf_move_to_front(j);
- }
- }
- // POST: the range [L.begin(), support_end) stores the set S\B
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::mtf_move_to_front (Sit j)
- {
- if (support_end == j)
- support_end++;
- L.splice (L.begin(), L, j);
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pivot_mb (Pit n)
- {
- // Algorithm 2: pivot_mb (L_{n-1}), where L_{n-1} = [L.begin, n)
- // --------------------------------------------------------------
- // from B. Gaertner, Fast and Robust Smallest Enclosing Balls, ESA 1999,
- // http://www.inf.ethz.ch/personal/gaertner/texts/own_work/esa99_final.pdf
- NT old_sqr_r;
- const NT* c;
- Pit pivot, k;
- NT e, max_e, sqr_r;
- Cit p;
- do {
- old_sqr_r = current_sqr_r;
- sqr_r = current_sqr_r;
-
- pivot = points_begin;
- max_e = nt0;
- for (k = points_begin; k != n; ++k) {
- p = coord_accessor(k);
- e = -sqr_r;
- c = current_c;
- for (int j=0; j<d; ++j)
- e += mb_sqr<NT>(*p++-*c++);
- if (e > max_e) {
- max_e = e;
- pivot = k;
- }
- }
-
- if (max_e > nt0) {
- // check if the pivot is already contained in the support set
- if (std::find(L.begin(), support_end, pivot) == support_end) {
- assert (fsize == 0);
- if (push (pivot)) {
- mtf_mb(support_end);
- pop();
- pivot_move_to_front(pivot);
- }
- }
- }
- } while (old_sqr_r < current_sqr_r);
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pivot_move_to_front (Pit j)
- {
- L.push_front(j);
- if (std::distance(L.begin(), support_end) == d+2)
- support_end--;
- }
-
- template <typename CoordAccessor>
- inline typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::excess (Pit pit) const
- {
- Cit p = coord_accessor(pit);
- NT e = -current_sqr_r;
- NT* c = current_c;
- for (int k=0; k<d; ++k){
- e += mb_sqr<NT>(*p++-*c++);
- }
- return e;
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pop ()
- {
- --fsize;
- }
-
- template <typename CoordAccessor>
- bool Miniball<CoordAccessor>::push (Pit pit)
- {
- int i, j;
- NT eps = mb_sqr<NT>(std::numeric_limits<NT>::epsilon());
-
- Cit cit = coord_accessor(pit);
- Cit p = cit;
-
- if (fsize==0) {
- for (i=0; i<d; ++i)
- q0[i] = *p++;
- for (i=0; i<d; ++i)
- c[0][i] = q0[i];
- sqr_r[0] = nt0;
- }
- else {
- // set v_fsize to Q_fsize
- for (i=0; i<d; ++i)
- //v[fsize][i] = p[i]-q0[i];
- v[fsize][i] = *p++-q0[i];
-
- // compute the a_{fsize,i}, i< fsize
- for (i=1; i<fsize; ++i) {
- a[fsize][i] = nt0;
- for (j=0; j<d; ++j)
- a[fsize][i] += v[i][j] * v[fsize][j];
- a[fsize][i]*=(2/z[i]);
- }
-
- // update v_fsize to Q_fsize-\bar{Q}_fsize
- for (i=1; i<fsize; ++i) {
- for (j=0; j<d; ++j)
- v[fsize][j] -= a[fsize][i]*v[i][j];
- }
-
- // compute z_fsize
- z[fsize]=nt0;
- for (j=0; j<d; ++j)
- z[fsize] += mb_sqr<NT>(v[fsize][j]);
- z[fsize]*=2;
-
- // reject push if z_fsize too small
- if (z[fsize]<eps*current_sqr_r) {
- return false;
- }
-
- // update c, sqr_r
- p=cit;
- NT e = -sqr_r[fsize-1];
- for (i=0; i<d; ++i)
- e += mb_sqr<NT>(*p++-c[fsize-1][i]);
- f[fsize]=e/z[fsize];
-
- for (i=0; i<d; ++i)
- c[fsize][i] = c[fsize-1][i]+f[fsize]*v[fsize][i];
- sqr_r[fsize] = sqr_r[fsize-1] + e*f[fsize]/2;
- }
- current_c = c[fsize];
- current_sqr_r = sqr_r[fsize];
- ssize = ++fsize;
- return true;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::suboptimality () const
- {
- NT* l = new NT[d+1];
- NT min_l = nt0;
- l[0] = NT(1);
- for (int i=ssize-1; i>0; --i) {
- l[i] = f[i];
- for (int k=ssize-1; k>i; --k)
- l[i]-=a[k][i]*l[k];
- if (l[i] < min_l) min_l = l[i];
- l[0] -= l[i];
- }
- if (l[0] < min_l) min_l = l[0];
- delete[] l;
- if (min_l < nt0)
- return -min_l;
- return nt0;
- }
-} // namespace Miniball
-
-} // namespace Gudhi
-
-#endif // MINIBALL_HPP_
diff --git a/src/Cech_complex/include/gudhi/Sphere_circumradius.h b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
new file mode 100644
index 00000000..2f916c0a
--- /dev/null
+++ b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
@@ -0,0 +1,78 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Hind Montassif
+ *
+ * Copyright (C) 2021 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef SPHERE_CIRCUMRADIUS_H_
+#define SPHERE_CIRCUMRADIUS_H_
+
+#include <CGAL/Epick_d.h> // for #include <CGAL/NT_converter.h> which is not working/compiling alone
+#include <CGAL/Lazy_exact_nt.h> // for CGAL::exact
+
+#include <cmath> // for std::sqrt
+#include <vector>
+
+namespace Gudhi {
+
+namespace cech_complex {
+
+/** \private @brief Compute the circumradius of the sphere passing through points given by a range of coordinates.
+ * The points are assumed to have the same dimension. */
+template<typename Kernel, typename Filtration_value>
+class Sphere_circumradius {
+ private:
+ Kernel kernel_;
+ const bool exact_;
+ public:
+ using FT = typename Kernel::FT;
+ using Point = typename Kernel::Point_d;
+ using Point_cloud = typename std::vector<Point>;
+
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+
+ /** \brief Circumradius of sphere passing through two points using CGAL.
+ *
+ * @param[in] point_1
+ * @param[in] point_2
+ * @return Sphere circumradius passing through two points.
+ * \tparam Point must be a Kernel::Point_d from CGAL.
+ *
+ */
+ Filtration_value operator()(const Point& point_1, const Point& point_2) const {
+ auto squared_dist_obj = kernel_.squared_distance_d_object()(point_1, point_2);
+ if(exact_) CGAL::exact(squared_dist_obj);
+ return std::sqrt(cast_to_fv(squared_dist_obj)) / 2.;
+ }
+
+ /** \brief Circumradius of sphere passing through point cloud using CGAL.
+ *
+ * @param[in] point_cloud The points.
+ * @return Sphere circumradius passing through the points.
+ * \tparam Point_cloud must be a range of Kernel::Point_d points from CGAL.
+ *
+ */
+ Filtration_value operator()(const Point_cloud& point_cloud) const {
+ auto squared_radius_obj = kernel_.compute_squared_radius_d_object()(point_cloud.begin(), point_cloud.end());
+ if(exact_) CGAL::exact(squared_radius_obj);
+ return std::sqrt(cast_to_fv(squared_radius_obj));
+ }
+
+ /** \brief Constructor
+ * @param[in] exact Option for exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
+ */
+ Sphere_circumradius(const bool exact = false) : exact_(exact) {}
+
+};
+
+} // namespace cech_complex
+
+} // namespace Gudhi
+
+#endif // SPHERE_CIRCUMRADIUS_H_
diff --git a/src/Cech_complex/test/CMakeLists.txt b/src/Cech_complex/test/CMakeLists.txt
index db510af3..2d736f27 100644
--- a/src/Cech_complex/test/CMakeLists.txt
+++ b/src/Cech_complex/test/CMakeLists.txt
@@ -1,14 +1,14 @@
-cmake_minimum_required(VERSION 2.6)
-project(Cech_complex_tests)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ include(GUDHI_boost_test)
-include(GUDHI_boost_test)
+ add_executable ( Cech_complex_test_unit test_cech_complex.cpp )
+ if (TBB_FOUND)
+ target_link_libraries(Cech_complex_test_unit ${TBB_LIBRARIES})
+ endif()
-add_executable ( Cech_complex_test_unit test_cech_complex.cpp )
-if (TBB_FOUND)
- target_link_libraries(Cech_complex_test_unit ${TBB_LIBRARIES})
-endif()
+ # Do not forget to copy test files in current binary dir
+ file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-# Do not forget to copy test files in current binary dir
-file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ gudhi_add_boost_test(Cech_complex_test_unit)
-gudhi_add_boost_test(Cech_complex_test_unit)
+endif()
diff --git a/src/Cech_complex/test/test_cech_complex.cpp b/src/Cech_complex/test/test_cech_complex.cpp
index c6b15d7f..f5980e6d 100644
--- a/src/Cech_complex/test/test_cech_complex.cpp
+++ b/src/Cech_complex/test/test_cech_complex.cpp
@@ -22,21 +22,20 @@
// to construct Cech_complex from a OFF file of points
#include <gudhi/Points_off_io.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/distance_functions.h>
#include <gudhi/Unitary_tests_utils.h>
-#include <gudhi/Miniball.hpp>
+
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
// Type definitions
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<Filtration_value>;
+using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+using FT = typename Kernel::FT;
+using Point = typename Kernel::Point_d;
+
using Point_cloud = std::vector<Point>;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
-
-using Point_iterator = Point_cloud::const_iterator;
-using Coordinate_iterator = Point::const_iterator;
-using Min_sphere = Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
+using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
// ----------------------------------------------------------------------------
@@ -45,20 +44,32 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
//
// ----------------------------------------------------------------------------
Point_cloud points;
- points.push_back({1., 0.}); // 0
- points.push_back({0., 1.}); // 1
- points.push_back({2., 1.}); // 2
- points.push_back({3., 2.}); // 3
- points.push_back({0., 3.}); // 4
- points.push_back({3. + std::sqrt(3.), 3.}); // 5
- points.push_back({1., 4.}); // 6
- points.push_back({3., 4.}); // 7
- points.push_back({2., 4. + std::sqrt(3.)}); // 8
- points.push_back({0., 4.}); // 9
- points.push_back({-0.5, 2.}); // 10
+
+ std::vector<FT> point0({1., 0.});
+ points.emplace_back(point0.begin(), point0.end());
+ std::vector<FT> point1({0., 1.});
+ points.emplace_back(point1.begin(), point1.end());
+ std::vector<FT> point2({2., 1.});
+ points.emplace_back(point2.begin(), point2.end());
+ std::vector<FT> point3({3., 2.});
+ points.emplace_back(point3.begin(), point3.end());
+ std::vector<FT> point4({0., 3.});
+ points.emplace_back(point4.begin(), point4.end());
+ std::vector<FT> point5({3. + std::sqrt(3.), 3.});
+ points.emplace_back(point5.begin(), point5.end());
+ std::vector<FT> point6({1., 4.});
+ points.emplace_back(point6.begin(), point6.end());
+ std::vector<FT> point7({3., 4.});
+ points.emplace_back(point7.begin(), point7.end());
+ std::vector<FT> point8({2., 4. + std::sqrt(3.)});
+ points.emplace_back(point8.begin(), point8.end());
+ std::vector<FT> point9({0., 4.});
+ points.emplace_back(point9.begin(), point9.end());
+ std::vector<FT> point10({-0.5, 2.});
+ points.emplace_back(point10.begin(), point10.end());
Filtration_value max_radius = 1.0;
- std::cout << "========== NUMBER OF POINTS = " << points.size() << " - Cech max_radius = " << max_radius
+ std::clog << "========== NUMBER OF POINTS = " << points.size() << " - Cech max_radius = " << max_radius
<< "==========" << std::endl;
Cech_complex cech_complex_for_doc(points, max_radius);
@@ -72,14 +83,14 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
const int DIMENSION_1 = 1;
Simplex_tree st;
cech_complex_for_doc.create_complex(st, DIMENSION_1);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION_1);
const int NUMBER_OF_VERTICES = 11;
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 27);
// Check filtration values of vertices is 0.0
@@ -91,16 +102,16 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) {
if (DIMENSION_1 == st.dimension(f_simplex)) {
std::vector<Point> vp;
- std::cout << "vertex = (";
+ std::clog << "vertex = (";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << ",";
+ std::clog << vertex << ",";
vp.push_back(points.at(vertex));
}
- std::cout << ") - distance =" << Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1))
+ std::clog << ") - distance =" << Gudhi::cech_complex::Sphere_circumradius<Kernel, Filtration_value>()(vp.at(0), vp.at(1))
<< " - filtration =" << st.filtration(f_simplex) << std::endl;
BOOST_CHECK(vp.size() == 2);
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex),
- Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1)));
+ Gudhi::cech_complex::Sphere_circumradius<Kernel, Filtration_value>()(vp.at(0), vp.at(1)));
}
}
@@ -112,48 +123,47 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
Simplex_tree st2;
cech_complex_for_doc.create_complex(st2, DIMENSION_2);
- std::cout << "st2.dimension()=" << st2.dimension() << std::endl;
+ std::clog << "st2.dimension()=" << st2.dimension() << std::endl;
BOOST_CHECK(st2.dimension() == DIMENSION_2);
- std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
+ std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
+ std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
BOOST_CHECK(st2.num_simplices() == 30);
Point_cloud points012;
for (std::size_t vertex = 0; vertex <= 2; vertex++) {
points012.push_back(cech_complex_for_doc.get_point(vertex));
}
- std::size_t dimension = points[0].end() - points[0].begin();
- Min_sphere ms012(dimension, points012.begin(), points012.end());
- Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
- std::cout << "f012= " << f012 << " | ms012_radius= " << std::sqrt(ms012.squared_radius()) << std::endl;
+ Kernel kern;
+ Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
+ std::clog << "f012= " << f012 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::sqrt(ms012.squared_radius()));
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::sqrt(cast_to_fv(kern.compute_squared_radius_d_object()(points012.begin(), points012.end()))));
Point_cloud points1410;
points1410.push_back(cech_complex_for_doc.get_point(1));
points1410.push_back(cech_complex_for_doc.get_point(4));
points1410.push_back(cech_complex_for_doc.get_point(10));
- Min_sphere ms1410(dimension, points1410.begin(), points1410.end());
- Simplex_tree::Filtration_value f1410 = st2.filtration(st2.find({1, 4, 10}));
- std::cout << "f1410= " << f1410 << " | ms1410_radius= " << std::sqrt(ms1410.squared_radius()) << std::endl;
+ Filtration_value f1410 = st2.filtration(st2.find({1, 4, 10}));
+ std::clog << "f1410= " << f1410 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f1410, std::sqrt(ms1410.squared_radius()));
+ // In this case, the computed circumsphere using CGAL kernel does not match the minimal enclosing ball; the filtration value check is therefore done against a hardcoded value
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f1410, 1.);
Point_cloud points469;
points469.push_back(cech_complex_for_doc.get_point(4));
points469.push_back(cech_complex_for_doc.get_point(6));
points469.push_back(cech_complex_for_doc.get_point(9));
- Min_sphere ms469(dimension, points469.begin(), points469.end());
- Simplex_tree::Filtration_value f469 = st2.filtration(st2.find({4, 6, 9}));
- std::cout << "f469= " << f469 << " | ms469_radius= " << std::sqrt(ms469.squared_radius()) << std::endl;
+ Filtration_value f469 = st2.filtration(st2.find({4, 6, 9}));
+ std::clog << "f469= " << f469 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f469, std::sqrt(ms469.squared_radius()));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f469, std::sqrt(cast_to_fv(kern.compute_squared_radius_d_object()(points469.begin(), points469.end()))));
BOOST_CHECK((st2.find({6, 7, 8}) == st2.null_simplex()));
BOOST_CHECK((st2.find({3, 5, 7}) == st2.null_simplex()));
@@ -178,35 +188,35 @@ BOOST_AUTO_TEST_CASE(Cech_complex_from_points) {
// ----------------------------------------------------------------------------
Cech_complex cech_complex_from_points(points, 2.0);
- std::cout << "========== cech_complex_from_points ==========" << std::endl;
+ std::clog << "========== cech_complex_from_points ==========" << std::endl;
Simplex_tree st;
const int DIMENSION = 3;
cech_complex_from_points.create_complex(st, DIMENSION);
// Another way to check num_simplices
- std::cout << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Cech complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
for (auto f_simplex : st.filtration_simplex_range()) {
num_simplices++;
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << st.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
}
BOOST_CHECK(num_simplices == 15);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 15);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION);
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == 4);
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
+ std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
switch (st.dimension(f_simplex)) {
case 0:
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0);
@@ -235,8 +245,8 @@ BOOST_AUTO_TEST_CASE(Cech_create_complex_throw) {
//
// ----------------------------------------------------------------------------
std::string off_file_name("alphacomplexdoc.off");
- double max_radius = 12.0;
- std::cout << "========== OFF FILE NAME = " << off_file_name << " - Cech max_radius=" << max_radius
+ Filtration_value max_radius = 12.0;
+ std::clog << "========== OFF FILE NAME = " << off_file_name << " - Cech max_radius=" << max_radius
<< "==========" << std::endl;
Gudhi::Points_off_reader<Point> off_reader(off_file_name);
@@ -245,7 +255,7 @@ BOOST_AUTO_TEST_CASE(Cech_create_complex_throw) {
Simplex_tree stree;
std::vector<int> simplex = {0, 1, 2};
stree.insert_simplex_and_subfaces(simplex);
- std::cout << "Check exception throw in debug mode" << std::endl;
+ std::clog << "Check exception throw in debug mode" << std::endl;
// throw excpt because stree is not empty
BOOST_CHECK_THROW(cech_complex_from_file.create_complex(stree, 1), std::invalid_argument);
}
diff --git a/src/Cech_complex/utilities/CMakeLists.txt b/src/Cech_complex/utilities/CMakeLists.txt
index 30b99729..64557cee 100644
--- a/src/Cech_complex/utilities/CMakeLists.txt
+++ b/src/Cech_complex/utilities/CMakeLists.txt
@@ -1,14 +1,33 @@
-cmake_minimum_required(VERSION 2.6)
-project(Cech_complex_utilities)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ project(Cech_complex_utilities)
-add_executable(cech_persistence cech_persistence.cpp)
-target_link_libraries(cech_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
+ if (TARGET Boost::program_options)
+ add_executable(cech_persistence cech_persistence.cpp)
+ target_link_libraries(cech_persistence Boost::program_options)
-if (TBB_FOUND)
- target_link_libraries(cech_persistence ${TBB_LIBRARIES})
-endif()
+ if (TBB_FOUND)
+ target_link_libraries(cech_persistence ${TBB_LIBRARIES})
+ endif()
+
+ add_test(NAME Cech_complex_utility_from_rips_on_tore_3D_safe COMMAND $<TARGET_FILE:cech_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3" "-o" "safe.pers")
+ add_test(NAME Cech_complex_utility_from_rips_on_tore_3D_fast COMMAND $<TARGET_FILE:cech_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3" "-o" "fast.pers" "-f")
+ add_test(NAME Cech_complex_utility_from_rips_on_tore_3D_exact COMMAND $<TARGET_FILE:cech_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3" "-o" "exact.pers" "-e")
-add_test(NAME Cech_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:cech_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
+ if (DIFF_PATH)
+ add_test(Cech_complex_utilities_diff_exact ${DIFF_PATH}
+ "exact.pers" "safe.pers")
+ set_tests_properties(Cech_complex_utilities_diff_exact PROPERTIES DEPENDS
+ "Cech_complex_utility_from_rips_on_tore_3D_safe;Cech_complex_utility_from_rips_on_tore_3D_exact")
-install(TARGETS cech_persistence DESTINATION bin)
+ add_test(Cech_complex_utilities_diff_fast ${DIFF_PATH}
+ "fast.pers" "safe.pers")
+ set_tests_properties(Cech_complex_utilities_diff_fast PROPERTIES DEPENDS
+ "Cech_complex_utility_from_rips_on_tore_3D_safe;Cech_complex_utility_from_rips_on_tore_3D_fast")
+ endif()
+
+ install(TARGETS cech_persistence DESTINATION bin)
+ endif()
+endif()
diff --git a/src/Cech_complex/utilities/cech_persistence.cpp b/src/Cech_complex/utilities/cech_persistence.cpp
index 8cfe018b..e6419f3d 100644
--- a/src/Cech_complex/utilities/cech_persistence.cpp
+++ b/src/Cech_complex/utilities/cech_persistence.cpp
@@ -9,13 +9,15 @@
*/
#include <gudhi/Cech_complex.h>
-#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Persistent_cohomology.h>
#include <gudhi/Points_off_io.h>
#include <boost/program_options.hpp>
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
+#include <CGAL/Epick_d.h> // For FAST version
+
#include <string>
#include <vector>
#include <limits> // infinity
@@ -23,41 +25,67 @@
// Types definition
using Simplex_tree = Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_fast_persistence>;
using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<double>;
-using Point_cloud = std::vector<Point>;
-using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
+
using Field_Zp = Gudhi::persistent_cohomology::Field_Zp;
using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Field_Zp>;
-void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
- Filtration_value& max_radius, int& dim_max, int& p, Filtration_value& min_persistence);
+void program_options(int argc, char* argv[], std::string& off_file_points, bool& exact, bool& fast,
+ std::string& filediag, Filtration_value& max_radius, int& dim_max, int& p,
+ Filtration_value& min_persistence);
+
+template<class Kernel>
+Simplex_tree create_simplex_tree(const std::string &off_file_points, bool exact_version,
+ Filtration_value max_radius, int dim_max) {
+ using Point = typename Kernel::Point_d;
+ using Points_off_reader = Gudhi::Points_off_reader<Point>;
+ using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
+
+ Simplex_tree stree;
+
+ Points_off_reader off_reader(off_file_points);
+ Cech_complex cech_complex_from_file(off_reader.get_point_cloud(), max_radius, exact_version);
+ cech_complex_from_file.create_complex(stree, dim_max);
+
+ return stree;
+}
int main(int argc, char* argv[]) {
std::string off_file_points;
std::string filediag;
+ bool exact_version = false;
+ bool fast_version = false;
Filtration_value max_radius;
int dim_max;
int p;
Filtration_value min_persistence;
- program_options(argc, argv, off_file_points, filediag, max_radius, dim_max, p, min_persistence);
+ program_options(argc, argv, off_file_points, exact_version, fast_version, filediag, max_radius, dim_max, p,
+ min_persistence);
- Points_off_reader off_reader(off_file_points);
- Cech_complex cech_complex_from_file(off_reader.get_point_cloud(), max_radius);
+ if ((exact_version) && (fast_version)) {
+ std::cerr << "You cannot set the exact and the fast version." << std::endl;
+ exit(-1);
+ }
- // Construct the Cech complex in a Simplex Tree
- Simplex_tree simplex_tree;
+ Simplex_tree stree;
+ if (fast_version) {
+ // WARNING : CGAL::Epick_d is fast but not safe (unlike CGAL::Epeck_d)
+ // (i.e. when the points are on a grid)
+ using Fast_kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
+ stree = create_simplex_tree<Fast_kernel>(off_file_points, exact_version, max_radius, dim_max);
+ } else {
+ using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+ stree = create_simplex_tree<Kernel>(off_file_points, exact_version, max_radius, dim_max);
+ }
- cech_complex_from_file.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
+ std::clog << "The complex contains " << stree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << stree.dimension() << " \n";
// Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ stree.initialize_filtration();
// Compute the persistence diagram of the complex
- Persistent_cohomology pcoh(simplex_tree);
+ Persistent_cohomology pcoh(stree);
// initializes the coefficient field for homology
pcoh.init_coefficients(p);
@@ -75,8 +103,9 @@ int main(int argc, char* argv[]) {
return 0;
}
-void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
- Filtration_value& max_radius, int& dim_max, int& p, Filtration_value& min_persistence) {
+void program_options(int argc, char* argv[], std::string& off_file_points, bool& exact, bool& fast,
+ std::string& filediag, Filtration_value& max_radius, int& dim_max, int& p,
+ Filtration_value& min_persistence) {
namespace po = boost::program_options;
po::options_description hidden("Hidden options");
hidden.add_options()("input-file", po::value<std::string>(&off_file_points),
@@ -84,8 +113,12 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
+ "exact,e", po::bool_switch(&exact),
+ "To activate exact version of Cech complex (default is false, not available if fast is set)")(
+ "fast,f", po::bool_switch(&fast),
+ "To activate fast version of Cech complex (default is false, not available if exact is set)")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-radius,r",
po::value<Filtration_value>(&max_radius)->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal length of an edge for the Cech complex construction.")(
@@ -108,17 +141,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Cech complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Cech complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Cech_complex/utilities/cechcomplex.md b/src/Cech_complex/utilities/cechcomplex.md
index f7817dbb..54c4e88d 100644
--- a/src/Cech_complex/utilities/cechcomplex.md
+++ b/src/Cech_complex/utilities/cechcomplex.md
@@ -1,3 +1,13 @@
+---
+layout: page
+title: "Čech complex"
+meta_title: "Čech complex"
+teaser: ""
+permalink: /cechcomplex/
+---
+{::comment}
+Leave the lines above as it is required by the web site generator 'Jekyll'
+{:/comment}
# Čech complex #
@@ -16,18 +26,24 @@ a prime number).
**Usage**
-`cech_persistence [options] <OFF input file>`
+`cech_persistence [options] <input OFF file>`
+
+where
+`<input OFF file>` is the path to the input point cloud in
+[nOFF ASCII format]({{ site.officialurl }}/doc/latest/fileformats.html#FileFormatsOFF).
**Allowed options**
* `-h [ --help ]` Produce help message
* `-o [ --output-file ]` Name of file in which the persistence diagram is written. Default print in standard output.
-* `-r [ --max-edge-length ]` (default = inf) Maximal length of an edge for the Čech complex construction.
+* `-r [ --max-radius ]` (default = inf) Maximal radius for the Čech complex construction.
* `-d [ --cpx-dimension ]` (default = 1) Maximal dimension of the Čech complex we want to compute.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
+* `-e [ --exact ]` for the exact computation version.
+* `-f [ --fast ]` for the fast computation version.
-Beware: this program may use a lot of RAM and take a lot of time if `max-edge-length` is set to a large value.
+Beware: this program may use a lot of RAM and take a lot of time if `max-radius` is set to a large value.
**Example 1 with Z/2Z coefficients**
diff --git a/src/Collapse/doc/dominated_edge.png b/src/Collapse/doc/dominated_edge.png
new file mode 100644
index 00000000..5900a55a
--- /dev/null
+++ b/src/Collapse/doc/dominated_edge.png
Binary files differ
diff --git a/src/Collapse/doc/intro_edge_collapse.h b/src/Collapse/doc/intro_edge_collapse.h
new file mode 100644
index 00000000..12e909c8
--- /dev/null
+++ b/src/Collapse/doc/intro_edge_collapse.h
@@ -0,0 +1,81 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siddharth Pritam
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef DOC_EDGE_COLLAPSE_INTRO_EDGE_COLLAPSE_H_
+#define DOC_EDGE_COLLAPSE_INTRO_EDGE_COLLAPSE_H_
+
+namespace Gudhi {
+
+namespace collapse {
+
+/** \defgroup edge_collapse Edge collapse
+ *
+ * \author Siddharth Pritam and Marc Glisse
+ *
+ * @{
+ *
+ * This module implements edge collapse of a filtered flag complex as described in \cite edgecollapsearxiv, in
+ * particular it reduces a filtration of Vietoris-Rips complex represented by a graph to a smaller flag filtration with
+ * the same persistent homology.
+ *
+ * \section edge_collapse_definition Edge collapse definition
+ *
+ * An edge \f$e\f$ in a simplicial complex \f$K\f$ is called a <b>dominated edge</b> if the link of \f$e\f$ in
+ * \f$K\f$, \f$lk_K(e)\f$ is a simplicial cone, that is, there exists a vertex \f$v^{\prime} \notin e\f$ and a
+ * subcomplex \f$L\f$ in \f$K\f$, such that \f$lk_K(e) = v^{\prime}L\f$. We say that the vertex \f$v^{\prime}\f$
+ * \e dominates \f$e\f$ and \f$e\f$ is \e dominated by \f$v^{\prime}\f$.
+ * An <b> elementary edge collapse </b> is the removal of a dominated edge \f$e\f$ from \f$K\f$ (the cofaces of \f$e\f$
+ * are implicitly removed as well).
+ * Domination is used as a simple sufficient condition that ensures that this removal is a homotopy preserving
+ * operation.
+ *
+ * The dominated edges can be easily characterized as follows:
+ *
+ * -- For a general simplicial complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$,
+ * if and only if all the maximal simplices of \f$K\f$ that contain \f$e\f$ also contain \f$v^{\prime}\f$.
+ *
+ * -- For a flag complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$, if and only
+ * if all the vertices in \f$K\f$ that have an edge with both vertices of \f$e\f$ also have an edge with
+ * \f$v^{\prime}\f$. Notice that this only depends on the graph.
+ *
+ * In the context of a filtration, an edge collapse may translate into an increase of the filtration value of an edge,
+ * or its removal if it already had the largest filtration value.
+ * The algorithm to compute the smaller induced filtration is described in \cite edgecollapsearxiv.
+ * Edge collapse can be successfully employed to reduce any input filtration of flag complexes to a smaller induced
+ * filtration which preserves the persistent homology of the original filtration and is a flag complex as well.
+ *
+ * The algorithm implemented here does not produce a minimal filtration. Taking its output and applying the algorithm a
+ * second time may further simplify the filtration.
+ *
+ * \subsection edgecollapseexample Basic edge collapse
+ *
+ * This example calls `Gudhi::collapse::flag_complex_collapse_edges()` from a proximity graph represented as a list of
+ * `Filtered_edge`.
+ * Then it collapses edges and displays a new list of `Filtered_edge` (with fewer edges)
+ * that will preserve the persistence homology computation.
+ *
+ * \include edge_collapse_basic_example.cpp
+ *
+ * When launching the example:
+ *
+ * \code $> ./Edge_collapse_example_basic
+ * \endcode
+ *
+ * the program output could be:
+ *
+ * \include edge_collapse_example_basic.txt
+ */
+/** @} */ // end defgroup strong_collapse
+
+} // namespace collapse
+
+} // namespace Gudhi
+
+#endif // DOC_EDGE_COLLAPSE_INTRO_EDGE_COLLAPSE_H_
diff --git a/src/Collapse/example/CMakeLists.txt b/src/Collapse/example/CMakeLists.txt
new file mode 100644
index 00000000..4456a844
--- /dev/null
+++ b/src/Collapse/example/CMakeLists.txt
@@ -0,0 +1,28 @@
+project(Edge_collapse_examples)
+
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+
+ # Point cloud
+ add_executable ( Edge_collapse_example_basic edge_collapse_basic_example.cpp )
+
+ if (TBB_FOUND)
+ target_link_libraries(Edge_collapse_example_basic ${TBB_LIBRARIES})
+ endif()
+
+ add_test(NAME Edge_collapse_example_basic COMMAND $<TARGET_FILE:Edge_collapse_example_basic>)
+
+ # Point cloud
+ add_executable ( Edge_collapse_conserve_persistence edge_collapse_conserve_persistence.cpp )
+
+ if (TBB_FOUND)
+ target_link_libraries(Edge_collapse_conserve_persistence ${TBB_LIBRARIES})
+ endif()
+
+ add_test(NAME Edge_collapse_conserve_persistence_1 COMMAND $<TARGET_FILE:Edge_collapse_conserve_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "0.2")
+
+ add_test(NAME Edge_collapse_conserve_persistence_2 COMMAND $<TARGET_FILE:Edge_collapse_conserve_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "1.8")
+
+endif() \ No newline at end of file
diff --git a/src/Collapse/example/edge_collapse_basic_example.cpp b/src/Collapse/example/edge_collapse_basic_example.cpp
new file mode 100644
index 00000000..1b3dc1b5
--- /dev/null
+++ b/src/Collapse/example/edge_collapse_basic_example.cpp
@@ -0,0 +1,36 @@
+#include <gudhi/Flag_complex_edge_collapser.h>
+
+#include <iostream>
+#include <vector>
+#include <tuple>
+
+int main() {
+ // Type definitions
+ using Filtration_value = float;
+ using Vertex_handle = short;
+ using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
+ using Filtered_edge_list = std::vector<Filtered_edge>;
+
+ // 1 2
+ // o---o
+ // |\ /|
+ // | x |
+ // |/ \|
+ // o---o
+ // 0 3
+ Filtered_edge_list graph = {{0, 1, 1.},
+ {1, 2, 1.},
+ {2, 3, 1.},
+ {3, 0, 1.},
+ {0, 2, 2.},
+ {1, 3, 2.}};
+
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(graph);
+
+ for (auto filtered_edge_from_collapse : remaining_edges) {
+ std::cout << "fn[" << std::get<0>(filtered_edge_from_collapse) << ", " << std::get<1>(filtered_edge_from_collapse)
+ << "] = " << std::get<2>(filtered_edge_from_collapse) << std::endl;
+ }
+
+ return 0;
+}
diff --git a/src/Collapse/example/edge_collapse_conserve_persistence.cpp b/src/Collapse/example/edge_collapse_conserve_persistence.cpp
new file mode 100644
index 00000000..19960597
--- /dev/null
+++ b/src/Collapse/example/edge_collapse_conserve_persistence.cpp
@@ -0,0 +1,159 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Flag_complex_edge_collapser.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/distance_functions.h>
+#include <gudhi/Points_off_io.h>
+#include <gudhi/graph_simplicial_complex.h>
+
+#include <boost/range/adaptor/transformed.hpp>
+
+#include<utility> // for std::pair
+#include<vector>
+#include<tuple>
+
+// Types definition
+
+using Simplex_tree = Gudhi::Simplex_tree<>;
+using Filtration_value = Simplex_tree::Filtration_value;
+using Vertex_handle = Simplex_tree::Vertex_handle;
+using Point = std::vector<Filtration_value>;
+using Vector_of_points = std::vector<Point>;
+
+using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
+
+using Field_Zp = Gudhi::persistent_cohomology::Field_Zp;
+using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Field_Zp>;
+
+using Persistence_interval = std::tuple<int, Filtration_value, Filtration_value>;
+/*
+ * Compare two intervals by dimension, then by length.
+ */
+struct cmp_intervals_by_length {
+ explicit cmp_intervals_by_length(Simplex_tree * sc)
+ : sc_(sc) { }
+
+ template<typename Persistent_interval>
+ bool operator()(const Persistent_interval & p1, const Persistent_interval & p2) {
+ return (sc_->filtration(get < 1 > (p1)) - sc_->filtration(get < 0 > (p1))
+ > sc_->filtration(get < 1 > (p2)) - sc_->filtration(get < 0 > (p2)));
+ }
+ Simplex_tree* sc_;
+};
+
+std::vector<Persistence_interval> get_persistence_intervals(Simplex_tree& st, int ambient_dim) {
+ std::vector<Persistence_interval> persistence_intervals;
+ st.expansion(ambient_dim);
+
+ // Sort the simplices in the order of the filtration
+ st.initialize_filtration();
+ // Compute the persistence diagram of the complex
+ Persistent_cohomology pcoh(st);
+ // initializes the coefficient field for homology - must be a prime number
+ int p = 11;
+ pcoh.init_coefficients(p);
+
+ // Default min_interval_length = 0.
+ pcoh.compute_persistent_cohomology();
+ // Custom sort and output persistence
+ cmp_intervals_by_length cmp(&st);
+ auto persistent_pairs = pcoh.get_persistent_pairs();
+ std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp);
+ for (auto pair : persistent_pairs) {
+ persistence_intervals.emplace_back(st.dimension(get<0>(pair)),
+ st.filtration(get<0>(pair)),
+ st.filtration(get<1>(pair)));
+ }
+ return persistence_intervals;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc != 3) {
+ std::cerr << "This program requires an OFF file and minimal threshold value as parameter\n";
+ std::cerr << "For instance: ./Edge_collapse_conserve_persistence ../../data/points/tore3D_300.off 1.\n";
+ exit(-1); // ----- >>
+ }
+
+ std::string off_file_points {argv[1]};
+ double threshold {atof(argv[2])};
+
+ Gudhi::Points_off_reader<Point> off_reader(off_file_points);
+ if (!off_reader.is_valid()) {
+ std::cerr << "Unable to read file " << off_file_points << "\n";
+ exit(-1); // ----- >>
+ }
+
+ Vector_of_points point_vector = off_reader.get_point_cloud();
+ if (point_vector.size() <= 0) {
+ std::cerr << "Empty point cloud." << std::endl;
+ exit(-1); // ----- >>
+ }
+
+ Proximity_graph proximity_graph = Gudhi::compute_proximity_graph<Simplex_tree>(off_reader.get_point_cloud(),
+ threshold,
+ Gudhi::Euclidean_distance());
+
+ if (num_edges(proximity_graph) <= 0) {
+ std::cerr << "Total number of edges is zero." << std::endl;
+ exit(-1);
+ }
+
+ int ambient_dim = point_vector[0].size();
+
+ // ***** Simplex tree from a flag complex built after collapse *****
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(
+ boost::adaptors::transform(edges(proximity_graph), [&](auto&&edge){
+ return std::make_tuple(static_cast<Vertex_handle>(source(edge, proximity_graph)),
+ static_cast<Vertex_handle>(target(edge, proximity_graph)),
+ get(Gudhi::edge_filtration_t(), proximity_graph, edge));
+ })
+ );
+
+ Simplex_tree stree_from_collapse;
+ for (Vertex_handle vertex = 0; static_cast<std::size_t>(vertex) < point_vector.size(); vertex++) {
+ // insert the vertex with a 0. filtration value just like a Rips
+ stree_from_collapse.insert_simplex({vertex}, 0.);
+ }
+ for (auto remaining_edge : remaining_edges) {
+ stree_from_collapse.insert_simplex({std::get<0>(remaining_edge), std::get<1>(remaining_edge)},
+ std::get<2>(remaining_edge));
+ }
+
+ std::vector<Persistence_interval> persistence_intervals_from_collapse = get_persistence_intervals(stree_from_collapse, ambient_dim);
+
+ // ***** Simplex tree from the complete flag complex *****
+ Simplex_tree stree_wo_collapse;
+ stree_wo_collapse.insert_graph(proximity_graph);
+
+ std::vector<Persistence_interval> persistence_intervals_wo_collapse = get_persistence_intervals(stree_wo_collapse, ambient_dim);
+
+ // ***** Comparison *****
+ if (persistence_intervals_wo_collapse.size() != persistence_intervals_from_collapse.size()) {
+ std::cerr << "Number of persistence pairs with collapse is " << persistence_intervals_from_collapse.size() << std::endl;
+ std::cerr << "Number of persistence pairs without collapse is " << persistence_intervals_wo_collapse.size() << std::endl;
+ exit(-1);
+ }
+
+ int return_value = 0;
+ auto ppwoc_ptr = persistence_intervals_wo_collapse.begin();
+ for (auto ppfc: persistence_intervals_from_collapse) {
+ if (ppfc != *ppwoc_ptr) {
+ return_value++;
+ std::cerr << "Without collapse: "
+ << std::get<0>(*ppwoc_ptr) << " " << std::get<1>(*ppwoc_ptr) << " " << std::get<2>(*ppwoc_ptr)
+ << " - With collapse: "
+ << std::get<0>(ppfc) << " " << std::get<1>(ppfc) << " " << std::get<2>(ppfc) << std::endl;
+ }
+ ppwoc_ptr++;
+ }
+ return return_value;
+}
diff --git a/src/Collapse/example/edge_collapse_example_basic.txt b/src/Collapse/example/edge_collapse_example_basic.txt
new file mode 100644
index 00000000..acecacaf
--- /dev/null
+++ b/src/Collapse/example/edge_collapse_example_basic.txt
@@ -0,0 +1,5 @@
+fn[0, 1] = 1
+fn[1, 2] = 1
+fn[2, 3] = 1
+fn[3, 0] = 1
+fn[0, 2] = 2
diff --git a/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
new file mode 100644
index 00000000..d0b3fe4a
--- /dev/null
+++ b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
@@ -0,0 +1,337 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siddharth Pritam, Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - 2020/03 Vincent Rouvreau: integration to the gudhi library
+ * - 2021 Marc Glisse: complete rewrite
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FLAG_COMPLEX_EDGE_COLLAPSER_H_
+#define FLAG_COMPLEX_EDGE_COLLAPSER_H_
+
+#include <gudhi/Debug_utils.h>
+
+#include <boost/container/flat_map.hpp>
+#include <boost/container/flat_set.hpp>
+
+#ifdef GUDHI_USE_TBB
+#include <tbb/parallel_sort.h>
+#endif
+
+#include <utility>
+#include <vector>
+#include <tuple>
+#include <algorithm>
+#include <limits>
+
+namespace Gudhi {
+
+namespace collapse {
+
+/** \private
+ *
+ * \brief Flag complex sparse matrix data structure.
+ *
+ * \tparam Vertex type must be an integer type.
+ * \tparam Filtration type for the value of the filtration function.
+ */
+template<typename Vertex, typename Filtration_value>
+struct Flag_complex_edge_collapser {
+ using Filtered_edge = std::tuple<Vertex, Vertex, Filtration_value>;
+ typedef std::pair<Vertex,Vertex> Edge;
+ struct Cmpi { template<class T, class U> bool operator()(T const&a, U const&b)const{return b<a; } };
+ typedef boost::container::flat_map<Vertex, Filtration_value> Ngb_list;
+ typedef std::vector<Ngb_list> Neighbors;
+ Neighbors neighbors; // closed neighborhood
+ std::size_t num_vertices;
+ std::vector<std::tuple<Vertex, Vertex, Filtration_value>> res;
+
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ // Minimal matrix interface
+ // Using this matrix generally helps performance, but the memory use may be excessive for a very sparse graph
+ // (and in extreme cases the constant initialization of the matrix may start to dominate the running time).
+ // Are there cases where the matrix is too big but a hash table would help?
+ std::vector<Filtration_value> neighbors_data;
+ void init_neighbors_dense(){
+ neighbors_data.clear();
+ neighbors_data.resize(num_vertices*num_vertices, std::numeric_limits<Filtration_value>::infinity());
+ }
+ Filtration_value& neighbors_dense(Vertex i, Vertex j){return neighbors_data[num_vertices*j+i];}
+#endif
+
+ // This does not touch the events list, only the adjacency matrix(es)
+ void delay_neighbor(Vertex u, Vertex v, Filtration_value f) {
+ neighbors[u][v]=f;
+ neighbors[v][u]=f;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ void remove_neighbor(Vertex u, Vertex v) {
+ neighbors[u].erase(v);
+ neighbors[v].erase(u);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=std::numeric_limits<Filtration_value>::infinity();
+ neighbors_dense(v,u)=std::numeric_limits<Filtration_value>::infinity();
+#endif
+ }
+
+ template<class FilteredEdgeRange>
+ void read_edges(FilteredEdgeRange const&r){
+ neighbors.resize(num_vertices);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ init_neighbors_dense();
+#endif
+ // Use the raw sequence to avoid maintaining the order
+ std::vector<typename Ngb_list::sequence_type> neighbors_seq(num_vertices);
+ for(auto&&e : r){
+ using std::get;
+ Vertex u = get<0>(e);
+ Vertex v = get<1>(e);
+ Filtration_value f = get<2>(e);
+ neighbors_seq[u].emplace_back(v, f);
+ neighbors_seq[v].emplace_back(u, f);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ for(std::size_t i=0;i<neighbors_seq.size();++i){
+ neighbors_seq[i].emplace_back(i, -std::numeric_limits<Filtration_value>::infinity());
+ neighbors[i].adopt_sequence(std::move(neighbors_seq[i])); // calls sort
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(i,i)=-std::numeric_limits<Filtration_value>::infinity();
+#endif
+ }
+ }
+
+ // Open neighborhood
+ // At some point it helped gcc to add __attribute__((noinline)) here, otherwise we had +50% on the running time
+ // on one example. It looks ok now, or I forgot which example that was.
+ void common_neighbors(boost::container::flat_set<Vertex>& e_ngb,
+ std::vector<std::pair<Filtration_value, Vertex>>& e_ngb_later,
+ Vertex u, Vertex v, Filtration_value f_event){
+ // Using neighbors_dense here seems to hurt, even if we loop on the smaller of nu and nv.
+ Ngb_list const&nu = neighbors[u];
+ Ngb_list const&nv = neighbors[v];
+ auto ui = nu.begin();
+ auto ue = nu.end();
+ auto vi = nv.begin();
+ auto ve = nv.end();
+ assert(ui != ue && vi != ve);
+ while(ui != ue && vi != ve){
+ Vertex w = ui->first;
+ if(w < vi->first) { ++ui; continue; }
+ if(w > vi->first) { ++vi; continue; }
+ // nu and nv are closed, so we need to exclude e here.
+ if(w != u && w != v) {
+ Filtration_value f = std::max(ui->second, vi->second);
+ if(f > f_event)
+ e_ngb_later.emplace_back(f, w);
+ else
+ e_ngb.insert(e_ngb.end(), w);
+ }
+ ++ui; ++vi;
+ }
+ }
+
+ // Test if the neighborhood of e is included in the closed neighborhood of c
+ template<class Ngb>
+ bool is_dominated_by(Ngb const& e_ngb, Vertex c, Filtration_value f){
+ // The best strategy probably depends on the distribution, how sparse / dense the adjacency matrix is,
+ // how (un)balanced the sizes of e_ngb and nc are.
+ // Some efficient operations on sets work best with bitsets, although the need for a map complicates things.
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ for(auto v : e_ngb) {
+ // if(v==c)continue;
+ if(neighbors_dense(v,c) > f) return false;
+ }
+ return true;
+#else
+ auto&&nc = neighbors[c];
+ // if few neighbors, use dichotomy? Seems slower.
+ // I tried storing a copy of neighbors as a vector<absl::flat_hash_map> and using it for nc, but it was
+ // a bit slower here. It did help with neighbors[dominator].find(w) in the main function though,
+ // sometimes enough, sometimes not.
+ auto ci = nc.begin();
+ auto ce = nc.end();
+ auto eni = e_ngb.begin();
+ auto ene = e_ngb.end();
+ assert(eni != ene);
+ assert(ci != ce);
+ // if(*eni == c && ++eni == ene) return true;
+ for(;;){
+ Vertex ve = *eni;
+ Vertex vc = ci->first;
+ while(ve > vc) {
+ // try a gallop strategy (exponential search)? Seems slower
+ if(++ci == ce) return false;
+ vc = ci->first;
+ }
+ if(ve < vc) return false;
+ // ve == vc
+ if(ci->second > f) return false;
+ if(++eni == ene)return true;
+ // If we stored an open neighborhood of c (excluding c), we would need to test for c here and before the loop
+ // if(*eni == c && ++eni == ene)return true;
+ if(++ci == ce) return false;
+ }
+#endif
+ }
+
+ template<class FilteredEdgeRange, class Delay>
+ void process_edges(FilteredEdgeRange const& edges, Delay&& delay) {
+ {
+ Vertex maxi = 0, maxj = 0;
+ for(auto& fe : edges) {
+ Vertex i = std::get<0>(fe);
+ Vertex j = std::get<1>(fe);
+ if (i > maxi) maxi = i;
+ if (j > maxj) maxj = j;
+ }
+ num_vertices = std::max(maxi, maxj) + 1;
+ }
+
+ read_edges(edges);
+
+ boost::container::flat_set<Vertex> e_ngb;
+ e_ngb.reserve(num_vertices);
+ std::vector<std::pair<Filtration_value, Vertex>> e_ngb_later;
+ for(auto&e:edges) {
+ {
+ Vertex u = std::get<0>(e);
+ Vertex v = std::get<1>(e);
+ Filtration_value input_time = std::get<2>(e);
+ auto time = delay(input_time);
+ auto start_time = time;
+ e_ngb.clear();
+ e_ngb_later.clear();
+ common_neighbors(e_ngb, e_ngb_later, u, v, time);
+ // If we identify a good candidate (the first common neighbor) for being a dominator of e until infinity,
+ // we could check that a bit more cheaply. It does not seem to help though.
+ auto cmp1=[](auto const&a, auto const&b){return a.first > b.first;};
+ auto e_ngb_later_begin=e_ngb_later.begin();
+ auto e_ngb_later_end=e_ngb_later.end();
+ bool heapified = false;
+
+ bool dead = false;
+ while(true) {
+ Vertex dominator = -1;
+ // special case for size 1
+ // if(e_ngb.size()==1){dominator=*e_ngb.begin();}else
+ // It is tempting to test the dominators in increasing order of filtration value, which is likely to reduce
+ // the number of calls to is_dominated_by before finding a dominator, but sorting, even partially / lazily,
+ // is very expensive.
+ for(auto c : e_ngb){
+ if(is_dominated_by(e_ngb, c, time)){
+ dominator = c;
+ break;
+ }
+ }
+ if(dominator==-1) break;
+ // Push as long as dominator remains a dominator.
+ // Iterate on times where at least one neighbor appears.
+ for (bool still_dominated = true; still_dominated; ) {
+ if(e_ngb_later_begin == e_ngb_later_end) {
+ dead = true; goto end_move;
+ }
+ if(!heapified) {
+ // Eagerly sorting can be slow
+ std::make_heap(e_ngb_later_begin, e_ngb_later_end, cmp1);
+ heapified=true;
+ }
+ time = e_ngb_later_begin->first; // first place it may become critical
+ // Update the neighborhood for this new time, while checking if any of the new neighbors break domination.
+ while (e_ngb_later_begin != e_ngb_later_end && e_ngb_later_begin->first <= time) {
+ Vertex w = e_ngb_later_begin->second;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ if (neighbors_dense(dominator,w) > e_ngb_later_begin->first)
+ still_dominated = false;
+#else
+ auto& ngb_dom = neighbors[dominator];
+ auto wit = ngb_dom.find(w); // neighborhood may be open or closed, it does not matter
+ if (wit == ngb_dom.end() || wit->second > e_ngb_later_begin->first)
+ still_dominated = false;
+#endif
+ e_ngb.insert(w);
+ std::pop_heap(e_ngb_later_begin, e_ngb_later_end--, cmp1);
+ }
+ } // this doesn't seem to help that much...
+ }
+end_move:
+ if(dead) {
+ remove_neighbor(u, v);
+ } else if(start_time != time) {
+ delay_neighbor(u, v, time);
+ res.emplace_back(u, v, time);
+ } else {
+ res.emplace_back(u, v, input_time);
+ }
+ }
+ }
+ }
+
+ std::vector<Filtered_edge> output() {
+ return std::move(res);
+ }
+
+};
+
+template<class R> R to_range(R&& r) { return std::move(r); }
+template<class R, class T> R to_range(T&& t) { R r; r.insert(r.end(), t.begin(), t.end()); return r; }
+
+template<class FilteredEdgeRange, class Delay>
+auto flag_complex_collapse_edges(FilteredEdgeRange&& edges, Delay&&delay) {
+ // Would it help to label the points according to some spatial sorting?
+ auto first_edge_itr = std::begin(edges);
+ using Vertex = std::decay_t<decltype(std::get<0>(*first_edge_itr))>;
+ using Filtration_value = std::decay_t<decltype(std::get<2>(*first_edge_itr))>;
+ using Edge_collapser = Flag_complex_edge_collapser<Vertex, Filtration_value>;
+ if (first_edge_itr != std::end(edges)) {
+ auto edges2 = to_range<std::vector<typename Edge_collapser::Filtered_edge>>(std::forward<FilteredEdgeRange>(edges));
+#ifdef GUDHI_USE_TBB
+ // I think this sorting is always negligible compared to the collapse, but parallelizing it shouldn't hurt.
+ tbb::parallel_sort(edges2.begin(), edges2.end(),
+ [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
+#else
+ std::sort(edges2.begin(), edges2.end(), [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
+#endif
+ Edge_collapser edge_collapser;
+ edge_collapser.process_edges(edges2, std::forward<Delay>(delay));
+ return edge_collapser.output();
+ }
+ return std::vector<typename Edge_collapser::Filtered_edge>();
+}
+
+/** \brief Implicitly constructs a flag complex from edges as an input, collapses edges while preserving the persistent
+ * homology and returns the remaining edges as a range. The filtration value of vertices is irrelevant to this function.
+ *
+ * \param[in] edges Range of Filtered edges. There is no need for the range to be sorted, as it will be done internally.
+ *
+ * \tparam FilteredEdgeRange Range of `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`
+ * where `Vertex_handle` is the type of a vertex index.
+ *
+ * \return Remaining edges after collapse as a range of
+ * `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`.
+ *
+ * \ingroup edge_collapse
+ *
+ * \note
+ * Advanced: Defining the macro GUDHI_COLLAPSE_USE_DENSE_ARRAY tells gudhi to allocate a square table of size the
+ * maximum vertex index. This usually speeds up the computation for dense graphs. However, for sparse graphs, the memory
+ * use may be problematic and initializing this large table may be slow.
+ */
+template<class FilteredEdgeRange> auto flag_complex_collapse_edges(const FilteredEdgeRange& edges) {
+ return flag_complex_collapse_edges(edges, [](auto const&d){return d;});
+}
+
+} // namespace collapse
+
+} // namespace Gudhi
+
+#endif // FLAG_COMPLEX_EDGE_COLLAPSER_H_
diff --git a/src/Collapse/test/CMakeLists.txt b/src/Collapse/test/CMakeLists.txt
new file mode 100644
index 00000000..c04199e2
--- /dev/null
+++ b/src/Collapse/test/CMakeLists.txt
@@ -0,0 +1,13 @@
+project(Collapse_tests)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+
+ include(GUDHI_boost_test)
+
+ add_executable ( Collapse_test_unit collapse_unit_test.cpp )
+ if (TBB_FOUND)
+ target_link_libraries(Collapse_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Collapse_test_unit)
+
+endif() \ No newline at end of file
diff --git a/src/Collapse/test/collapse_unit_test.cpp b/src/Collapse/test/collapse_unit_test.cpp
new file mode 100644
index 00000000..f41dbedd
--- /dev/null
+++ b/src/Collapse/test/collapse_unit_test.cpp
@@ -0,0 +1,198 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "collapse"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+#include <gudhi/Flag_complex_edge_collapser.h>
+#include <gudhi/distance_functions.h>
+#include <gudhi/graph_simplicial_complex.h>
+
+#include <iostream>
+#include <tuple>
+#include <vector>
+#include <array>
+#include <cmath>
+
+struct Simplicial_complex {
+ using Vertex_handle = short;
+ using Filtration_value = float;
+};
+
+using Vertex_handle = Simplicial_complex::Vertex_handle;
+using Filtration_value = Simplicial_complex::Filtration_value;
+using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
+using Filtered_edge_list = std::vector<Filtered_edge>;
+
+template<typename Filtered_edge_range>
+bool find_edge_in_list(const Filtered_edge& edge, const Filtered_edge_range& edge_list) {
+ for (auto edge_from_list : edge_list) {
+ if (edge_from_list == edge)
+ return true;
+ }
+ return false;
+}
+
+template<typename Filtered_edge_range>
+void trace_and_check_collapse(const Filtered_edge_range& filtered_edges, const Filtered_edge_list& removed_edges) {
+ std::cout << "BEFORE COLLAPSE - Total number of edges: " << filtered_edges.size() << std::endl;
+ BOOST_CHECK(filtered_edges.size() > 0);
+ for (auto filtered_edge : filtered_edges) {
+ std::cout << "f[" << std::get<0>(filtered_edge) << ", " << std::get<1>(filtered_edge) << "] = "
+ << std::get<2>(filtered_edge) << std::endl;
+ }
+
+ std::cout << "COLLAPSE - keep edges: " << std::endl;
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(filtered_edges);
+
+ std::cout << "AFTER COLLAPSE - Total number of edges: " << remaining_edges.size() << std::endl;
+ BOOST_CHECK(remaining_edges.size() <= filtered_edges.size());
+ for (auto filtered_edge_from_collapse : remaining_edges) {
+ std::cout << "f[" << std::get<0>(filtered_edge_from_collapse) << ", " << std::get<1>(filtered_edge_from_collapse)
+ << "] = " << std::get<2>(filtered_edge_from_collapse) << std::endl;
+ // Check each edge from collapse is in the input
+ BOOST_CHECK(find_edge_in_list(filtered_edge_from_collapse, filtered_edges));
+ }
+
+ std::cout << "CHECK COLLAPSE - Total number of removed edges: " << removed_edges.size() << std::endl;
+ for (auto removed_filtered_edge : removed_edges) {
+ std::cout << "f[" << std::get<0>(removed_filtered_edge) << ", " << std::get<1>(removed_filtered_edge) << "] = "
+ << std::get<2>(removed_filtered_edge) << std::endl;
+ // Check each removed edge from collapse is in the input
+ BOOST_CHECK(!find_edge_in_list(removed_filtered_edge, remaining_edges));
+ }
+
+}
+
+BOOST_AUTO_TEST_CASE(collapse) {
+ std::cout << "***** COLLAPSE *****" << std::endl;
+ // 1 2
+ // o---o
+ // | |
+ // | |
+ // | |
+ // o---o
+ // 0 3
+ Filtered_edge_list edges {{0, 1, 1.},
+ {1, 2, 1.},
+ {2, 3, 1.},
+ {3, 0, 1.}};
+ trace_and_check_collapse(edges, {});
+
+ // 1 2
+ // o---o
+ // |\ /|
+ // | x |
+ // |/ \|
+ // o---o
+ // 0 3
+ edges.emplace_back(0, 2, 2.);
+ edges.emplace_back(1, 3, 2.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.1}});
+
+ // 1 2 4
+ // o---o---o
+ // |\ /| |
+ // | x | |
+ // |/ \| |
+ // o---o---o
+ // 0 3 5
+ edges.emplace_back(2, 4, 3.);
+ edges.emplace_back(4, 5, 3.);
+ edges.emplace_back(5, 3, 3.);
+ trace_and_check_collapse(edges, {{1, 3, 2.}});
+
+ // 1 2 4
+ // o---o---o
+ // |\ /|\ /|
+ // | x | x |
+ // |/ \|/ \|
+ // o---o---o
+ // 0 3 5
+ edges.emplace_back(2, 5, 4.);
+ edges.emplace_back(4, 3, 4.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.1}});
+
+ // 1 2 4
+ // o---o---o
+ // |\ /|\ /|
+ // | x | x | + [0,4] and [1,5]
+ // |/ \|/ \|
+ // o---o---o
+ // 0 3 5
+ edges.emplace_back(1, 5, 5.);
+ edges.emplace_back(0, 4, 5.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}, {0, 4, 5.1}});
+}
+
+BOOST_AUTO_TEST_CASE(collapse_from_array) {
+ std::cout << "***** COLLAPSE FROM ARRAY *****" << std::endl;
+ // 1 2
+ // o---o
+ // |\ /|
+ // | x |
+ // |/ \|
+ // o---o
+ // 0 3
+ std::array<Filtered_edge, 6> f_edge_array = {{{0, 1, 1.},
+ {1, 2, 1.},
+ {2, 3, 1.},
+ {3, 0, 1.},
+ {0, 2, 2.},
+ {1, 3, 2.1}}};
+ trace_and_check_collapse(f_edge_array, {{1, 3, 2.1}});
+}
+
+BOOST_AUTO_TEST_CASE(collapse_from_proximity_graph) {
+ std::cout << "***** COLLAPSE FROM PROXIMITY GRAPH *****" << std::endl;
+ // 1 2
+ // o---o
+ // |\ /|
+ // | x |
+ // |/ \|
+ // o---o
+ // 0 3
+ std::vector<std::vector<Filtration_value>> point_cloud = {{0., 0.},
+ {0., 1.},
+ {1., 0.},
+ {1., 1.} };
+
+ Filtration_value threshold = std::numeric_limits<Filtration_value>::infinity();
+ using Proximity_graph = Gudhi::Proximity_graph<Simplicial_complex>;
+ Proximity_graph proximity_graph = Gudhi::compute_proximity_graph<Simplicial_complex>(point_cloud,
+ threshold,
+ Gudhi::Euclidean_distance());
+
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(
+ boost::adaptors::transform(edges(proximity_graph), [&](auto&&edge){
+ return std::make_tuple(static_cast<Vertex_handle>(source(edge, proximity_graph)),
+ static_cast<Vertex_handle>(target(edge, proximity_graph)),
+ get(Gudhi::edge_filtration_t(), proximity_graph, edge));
+ })
+ );
+
+ BOOST_CHECK(remaining_edges.size() == 5);
+
+ std::size_t filtration_is_edge_length_nb = 0;
+ std::size_t filtration_is_diagonal_length_nb = 0;
+ float epsilon = std::numeric_limits<Filtration_value>::epsilon();
+ for (auto filtered_edge : remaining_edges) {
+ if (std::get<2>(filtered_edge) == 1.)
+ filtration_is_edge_length_nb++;
+ if (std::fabs(std::get<2>(filtered_edge) - std::sqrt(2.)) <= epsilon)
+ filtration_is_diagonal_length_nb++;
+ }
+ BOOST_CHECK(filtration_is_edge_length_nb == 4);
+ BOOST_CHECK(filtration_is_diagonal_length_nb == 1);
+}
diff --git a/src/Collapse/utilities/CMakeLists.txt b/src/Collapse/utilities/CMakeLists.txt
new file mode 100644
index 00000000..bce99e90
--- /dev/null
+++ b/src/Collapse/utilities/CMakeLists.txt
@@ -0,0 +1,37 @@
+project(Collapse_utilities)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+ if (TARGET Boost::program_options)
+ # From a point cloud
+ add_executable ( point_cloud_edge_collapse_rips_persistence point_cloud_edge_collapse_rips_persistence.cpp )
+ target_link_libraries(point_cloud_edge_collapse_rips_persistence Boost::program_options)
+
+ if (TBB_FOUND)
+ target_link_libraries(point_cloud_edge_collapse_rips_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Edge_collapse_utilities_point_cloud_rips_persistence COMMAND $<TARGET_FILE:point_cloud_edge_collapse_rips_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3" "-o" "off_results.pers")
+
+ install(TARGETS point_cloud_edge_collapse_rips_persistence DESTINATION bin)
+
+ # From a distance matrix
+ add_executable ( distance_matrix_edge_collapse_rips_persistence distance_matrix_edge_collapse_rips_persistence.cpp )
+ target_link_libraries(distance_matrix_edge_collapse_rips_persistence Boost::program_options)
+
+ if (TBB_FOUND)
+ target_link_libraries(distance_matrix_edge_collapse_rips_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Edge_collapse_utilities_distance_matrix_rips_persistence COMMAND $<TARGET_FILE:distance_matrix_edge_collapse_rips_persistence>
+ "${CMAKE_SOURCE_DIR}/data/distance_matrix/tore3D_1307_distance_matrix.csv" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3" "-o" "csv_results.pers")
+
+ install(TARGETS distance_matrix_edge_collapse_rips_persistence DESTINATION bin)
+
+ if (DIFF_PATH)
+ add_test(Edge_collapse_utilities_diff_persistence ${DIFF_PATH}
+ "off_results.pers" "csv_results.pers")
+ set_tests_properties(Edge_collapse_utilities_diff_persistence PROPERTIES DEPENDS
+ "Edge_collapse_utilities_point_cloud_rips_persistence;Edge_collapse_utilities_distance_matrix_rips_persistence")
+ endif()
+ endif()
+
+endif() \ No newline at end of file
diff --git a/src/Collapse/utilities/collapse.md b/src/Collapse/utilities/collapse.md
new file mode 100644
index 00000000..1f41bb1f
--- /dev/null
+++ b/src/Collapse/utilities/collapse.md
@@ -0,0 +1,63 @@
+---
+layout: page
+title: "Collapse"
+meta_title: "Edge collapse"
+teaser: ""
+permalink: /collapse/
+---
+{::comment}
+Leave the lines above as it is required by the web site generator 'Jekyll'
+{:/comment}
+
+
+## point_cloud_edge_collapse_rips_persistence ##
+This program computes the Rips graph defined on a set of input points, using Euclidean distance, and collapses edges.
+This program finally computes persistent homology with coefficient field *Z/pZ* of the Rips complex built on top of these collapse edges.
+The output diagram contains one bar per line, written with the convention:
+
+`p dim birth death`
+
+where `dim` is the dimension of the homological feature, `birth` and `death` are respectively the birth and death of the feature, and `p` is the characteristic of the field *Z/pZ* used for homology coefficients (`p` must be a prime number).
+
+**Usage**
+
+`point_cloud_edge_collapse_rips_persistence [options] <OFF input file>`
+
+**Allowed options**
+
+* `-h [ --help ]` Produce help message
+* `-o [ --output-file ]` Name of file in which the persistence diagram is written. Default print in standard output.
+* `-r [ --max-edge-length ]` (default = inf) Maximal length of an edge for the Rips complex construction.
+* `-d [ --cpx-dimension ]` (default = 1) Maximal dimension of the Rips complex we want to compute.
+* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
+* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
+* `-i [ --edge-collapse-iterations ]` (default = 1) Number of iterations edge collapse is performed.
+
+Beware: this program may use a lot of RAM and take a lot of time if `max-edge-length` is set to a large value.
+
+**Example 1 with Z/2Z coefficients**
+
+`point_cloud_edge_collapse_rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 2`
+
+**Example 2 with Z/3Z coefficients**
+
+`point_cloud_edge_collapse_rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3`
+
+
+## distance_matrix_edge_collapse_rips_persistence ##
+
+Same as `point_cloud_edge_collapse_rips_persistence` but taking a distance matrix as input.
+
+**Usage**
+
+`distance_matrix_edge_collapse_rips_persistence [options] <CSV input file>`
+
+where
+`<CSV input file>` is the path to the file containing a distance matrix. Can be square or lower triangular matrix. Separator is ';'.
+The code do not check if it is dealing with a distance matrix. It is the user responsibility to provide a valid input.
+Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an example of a file.
+
+**Example**
+
+`distance_matrix_edge_collapse_rips_persistence data/distance_matrix/full_square_distance_matrix.csv -r 15 -d 3 -p 3 -m 0`
+
diff --git a/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp b/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp
new file mode 100644
index 00000000..70b489b5
--- /dev/null
+++ b/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp
@@ -0,0 +1,152 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siddharth Pritam, Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Flag_complex_edge_collapser.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/reader_utils.h>
+#include <gudhi/graph_simplicial_complex.h>
+
+#include <boost/program_options.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+using Simplex_tree = Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_fast_persistence>;
+using Filtration_value = Simplex_tree::Filtration_value;
+using Vertex_handle = Simplex_tree::Vertex_handle;
+
+using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
+using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
+
+using Field_Zp = Gudhi::persistent_cohomology::Field_Zp;
+using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Field_Zp>;
+using Distance_matrix = std::vector<std::vector<Filtration_value>>;
+
+void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::string& filediag,
+ Filtration_value& threshold, int& dim_max, int& p, int& edge_collapse_iter_nb,
+ Filtration_value& min_persistence);
+
+int main(int argc, char* argv[]) {
+ std::string csv_matrix_file;
+ std::string filediag;
+ Filtration_value threshold;
+ int dim_max = 2;
+ int p;
+ int edge_collapse_iter_nb;
+ Filtration_value min_persistence;
+
+ program_options(argc, argv, csv_matrix_file, filediag, threshold, dim_max, p, edge_collapse_iter_nb,
+ min_persistence);
+
+ Distance_matrix distances = Gudhi::read_lower_triangular_matrix_from_csv_file<Filtration_value>(csv_matrix_file);
+ std::cout << "Read the distance matrix successfully, of size: " << distances.size() << std::endl;
+
+ Proximity_graph proximity_graph = Gudhi::compute_proximity_graph<Simplex_tree>(boost::irange((size_t)0,
+ distances.size()),
+ threshold,
+ [&distances](size_t i, size_t j) {
+ return distances[j][i];
+ });
+
+ auto edges_from_graph = boost::adaptors::transform(edges(proximity_graph), [&](auto&&edge){
+ return std::make_tuple(source(edge, proximity_graph),
+ target(edge, proximity_graph),
+ get(Gudhi::edge_filtration_t(), proximity_graph, edge));
+ });
+ std::vector<Filtered_edge> edges_list(edges_from_graph.begin(), edges_from_graph.end());
+ std::vector<Filtered_edge> remaining_edges;
+ for (int iter = 0; iter < edge_collapse_iter_nb; iter++) {
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(edges_list);
+ edges_list = std::move(remaining_edges);
+ remaining_edges.clear();
+ }
+
+ Simplex_tree stree;
+ for (Vertex_handle vertex = 0; static_cast<std::size_t>(vertex) < distances.size(); vertex++) {
+ // insert the vertex with a 0. filtration value just like a Rips
+ stree.insert_simplex({vertex}, 0.);
+ }
+ for (auto filtered_edge : edges_list) {
+ stree.insert_simplex({std::get<0>(filtered_edge), std::get<1>(filtered_edge)}, std::get<2>(filtered_edge));
+ }
+
+ stree.expansion(dim_max);
+
+ std::cout << "The complex contains " << stree.num_simplices() << " simplices after collapse. \n";
+ std::cout << " and has dimension " << stree.dimension() << " \n";
+
+ // Sort the simplices in the order of the filtration
+ stree.initialize_filtration();
+ // Compute the persistence diagram of the complex
+ Persistent_cohomology pcoh(stree);
+ // initializes the coefficient field for homology
+ pcoh.init_coefficients(3);
+
+ pcoh.compute_persistent_cohomology(min_persistence);
+ if (filediag.empty()) {
+ pcoh.output_diagram();
+ } else {
+ std::ofstream out(filediag);
+ pcoh.output_diagram(out);
+ out.close();
+ }
+ return 0;
+}
+
+void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::string& filediag,
+ Filtration_value& threshold, int& dim_max, int& p, int& edge_collapse_iter_nb,
+ Filtration_value& min_persistence) {
+ namespace po = boost::program_options;
+ po::options_description hidden("Hidden options");
+ hidden.add_options()(
+ "input-file", po::value<std::string>(&csv_matrix_file),
+ "Name of file containing a distance matrix. Can be square or lower triangular matrix. Separator is ';'.");
+
+ po::options_description visible("Allowed options", 100);
+ visible.add_options()("help,h", "produce help message")(
+ "output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
+ "max-edge-length,r",
+ po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
+ "Maximal length of an edge for the Rips complex construction.")(
+ "cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
+ "Maximal dimension of the Rips complex we want to compute.")(
+ "field-charac,p", po::value<int>(&p)->default_value(11),
+ "Characteristic p of the coefficient field Z/pZ for computing homology.")(
+ "edge-collapse-iterations,i", po::value<int>(&edge_collapse_iter_nb)->default_value(1),
+ "Number of iterations edge collapse is performed.")(
+ "min-persistence,m", po::value<Filtration_value>(&min_persistence),
+ "Minimal lifetime of homology feature to be recorded. Default is 0. Enter a negative value to see zero length "
+ "intervals");
+
+ po::positional_options_description pos;
+ pos.add("input-file", 1);
+
+ po::options_description all;
+ all.add(visible).add(hidden);
+
+ po::variables_map vm;
+ po::store(po::command_line_parser(argc, argv).options(all).positional(pos).run(), vm);
+ po::notify(vm);
+
+ if (vm.count("help") || !vm.count("input-file")) {
+ std::cout << std::endl;
+ std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::cout << "of a Rips complex after edge collapse defined on a set of distance matrix.\n \n";
+ std::cout << "The output diagram contains one bar per line, written with the convention: \n";
+ std::cout << " p dim b d \n";
+ std::cout << "where dim is the dimension of the homological feature,\n";
+ std::cout << "b and d are respectively the birth and death of the feature and \n";
+ std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::cout << visible << std::endl;
+ exit(-1);
+ }
+}
diff --git a/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp b/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp
new file mode 100644
index 00000000..a8fd6f14
--- /dev/null
+++ b/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp
@@ -0,0 +1,181 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siddharth Pritam, Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Flag_complex_edge_collapser.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/distance_functions.h>
+#include <gudhi/Points_off_io.h>
+#include <gudhi/graph_simplicial_complex.h>
+
+#include <boost/program_options.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+#include<utility> // for std::pair
+#include<vector>
+#include<tuple>
+
+// Types definition
+
+using Simplex_tree = Gudhi::Simplex_tree<>;
+using Filtration_value = Simplex_tree::Filtration_value;
+using Vertex_handle = Simplex_tree::Vertex_handle;
+using Point = std::vector<Filtration_value>;
+using Vector_of_points = std::vector<Point>;
+
+using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
+using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
+
+using Field_Zp = Gudhi::persistent_cohomology::Field_Zp;
+using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Field_Zp>;
+
+void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
+ Filtration_value& threshold, int& dim_max, int& p, int& edge_collapse_iter_nb,
+ Filtration_value& min_persistence);
+
+int main(int argc, char* argv[]) {
+ std::string off_file_points;
+ std::string filediag;
+ double threshold;
+ int dim_max;
+ int p;
+ int edge_collapse_iter_nb;
+ double min_persistence;
+
+ program_options(argc, argv, off_file_points, filediag, threshold, dim_max, p, edge_collapse_iter_nb, min_persistence);
+
+ std::cout << "The current input values to run the program is: " << std::endl;
+ std::cout << "min_persistence, threshold, max_complex_dimension, off_file_points, filediag"
+ << std::endl;
+ std::cout << min_persistence << ", " << threshold << ", " << dim_max
+ << ", " << off_file_points << ", " << filediag << std::endl;
+
+ Gudhi::Points_off_reader<Point> off_reader(off_file_points);
+ if (!off_reader.is_valid()) {
+ std::cerr << "Unable to read file " << off_file_points << "\n";
+ exit(-1); // ----- >>
+ }
+
+ Vector_of_points point_vector = off_reader.get_point_cloud();
+ if (point_vector.size() <= 0) {
+ std::cerr << "Empty point cloud." << std::endl;
+ exit(-1); // ----- >>
+ }
+
+ std::cout << "Successfully read " << point_vector.size() << " point_vector.\n";
+ std::cout << "Ambient dimension is " << point_vector[0].size() << ".\n";
+
+ Proximity_graph proximity_graph = Gudhi::compute_proximity_graph<Simplex_tree>(point_vector,
+ threshold,
+ Gudhi::Euclidean_distance());
+
+ if (num_edges(proximity_graph) <= 0) {
+ std::cerr << "Total number of edges is zero." << std::endl;
+ exit(-1);
+ }
+
+ auto edges_from_graph = boost::adaptors::transform(edges(proximity_graph), [&](auto&&edge){
+ return std::make_tuple(source(edge, proximity_graph),
+ target(edge, proximity_graph),
+ get(Gudhi::edge_filtration_t(), proximity_graph, edge));
+ });
+ std::vector<Filtered_edge> edges_list(edges_from_graph.begin(), edges_from_graph.end());
+
+ std::vector<Filtered_edge> remaining_edges;
+ for (int iter = 0; iter < edge_collapse_iter_nb; iter++) {
+ auto remaining_edges = Gudhi::collapse::flag_complex_collapse_edges(edges_list);
+ edges_list = std::move(remaining_edges);
+ remaining_edges.clear();
+ }
+
+ Simplex_tree stree;
+ for (Vertex_handle vertex = 0; static_cast<std::size_t>(vertex) < point_vector.size(); vertex++) {
+ // insert the vertex with a 0. filtration value just like a Rips
+ stree.insert_simplex({vertex}, 0.);
+ }
+
+ for (auto filtered_edge : edges_list) {
+ stree.insert_simplex({std::get<0>(filtered_edge), std::get<1>(filtered_edge)}, std::get<2>(filtered_edge));
+ }
+
+ stree.expansion(dim_max);
+
+ std::cout << "The complex contains " << stree.num_simplices() << " simplices after collapse. \n";
+ std::cout << " and has dimension " << stree.dimension() << " \n";
+
+ // Sort the simplices in the order of the filtration
+ stree.initialize_filtration();
+ // Compute the persistence diagram of the complex
+ Persistent_cohomology pcoh(stree);
+ // initializes the coefficient field for homology
+ pcoh.init_coefficients(p);
+
+ pcoh.compute_persistent_cohomology(min_persistence);
+ if (filediag.empty()) {
+ pcoh.output_diagram();
+ } else {
+ std::ofstream out(filediag);
+ pcoh.output_diagram(out);
+ out.close();
+ }
+
+ return 0;
+}
+
+void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
+ Filtration_value& threshold, int& dim_max, int& p, int& edge_collapse_iter_nb,
+ Filtration_value& min_persistence) {
+ namespace po = boost::program_options;
+ po::options_description hidden("Hidden options");
+ hidden.add_options()("input-file", po::value<std::string>(&off_file_points),
+ "Name of an OFF file containing a point set.\n");
+
+ po::options_description visible("Allowed options", 100);
+ visible.add_options()("help,h", "produce help message")(
+ "output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
+ "max-edge-length,r",
+ po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
+ "Maximal length of an edge for the Rips complex construction.")(
+ "cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
+ "Maximal dimension of the Rips complex we want to compute.")(
+ "field-charac,p", po::value<int>(&p)->default_value(11),
+ "Characteristic p of the coefficient field Z/pZ for computing homology.")(
+ "edge-collapse-iterations,i", po::value<int>(&edge_collapse_iter_nb)->default_value(1),
+ "Number of iterations edge collapse is performed.")(
+ "min-persistence,m", po::value<Filtration_value>(&min_persistence),
+ "Minimal lifetime of homology feature to be recorded. Default is 0. Enter a negative value to see zero length "
+ "intervals");
+
+ po::positional_options_description pos;
+ pos.add("input-file", 1);
+
+ po::options_description all;
+ all.add(visible).add(hidden);
+
+ po::variables_map vm;
+ po::store(po::command_line_parser(argc, argv).options(all).positional(pos).run(), vm);
+ po::notify(vm);
+
+ if (vm.count("help") || !vm.count("input-file")) {
+ std::cout << std::endl;
+ std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::cout << "of a Rips complex, after edge collapse, defined on a set of input points.\n \n";
+ std::cout << "The output diagram contains one bar per line, written with the convention: \n";
+ std::cout << " p dim b d \n";
+ std::cout << "where dim is the dimension of the homological feature,\n";
+ std::cout << "b and d are respectively the birth and death of the feature and \n";
+ std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::cout << visible << std::endl;
+ exit(-1);
+ }
+}
diff --git a/src/Contraction/doc/so3.svg b/src/Contraction/doc/so3.svg
index adea3f38..f10cab98 100644
--- a/src/Contraction/doc/so3.svg
+++ b/src/Contraction/doc/so3.svg
@@ -177,7 +177,7 @@
x="309.4176"
y="300.58682"
id="tspan4515-4"
- style="text-align:center;text-anchor:middle">Rips complex built uppon these points</tspan><tspan
+ style="text-align:center;text-anchor:middle">Rips complex built upon these points</tspan><tspan
sodipodi:role="line"
x="309.4176"
y="308.96704"
diff --git a/src/Contraction/example/CMakeLists.txt b/src/Contraction/example/CMakeLists.txt
index f0dc885d..c5d31aca 100644
--- a/src/Contraction/example/CMakeLists.txt
+++ b/src/Contraction/example/CMakeLists.txt
@@ -4,7 +4,6 @@ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
add_executable(RipsContraction Rips_contraction.cpp)
add_executable(GarlandHeckbert Garland_heckbert.cpp)
- target_link_libraries(GarlandHeckbert ${Boost_TIMER_LIBRARY})
add_test(NAME Contraction_example_tore3D_0.2 COMMAND $<TARGET_FILE:RipsContraction>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "0.2")
diff --git a/src/Contraction/example/Garland_heckbert.cpp b/src/Contraction/example/Garland_heckbert.cpp
index 9c0b5205..489ef5d0 100644
--- a/src/Contraction/example/Garland_heckbert.cpp
+++ b/src/Contraction/example/Garland_heckbert.cpp
@@ -147,7 +147,7 @@ int main(int argc, char *argv[]) {
return EXIT_FAILURE;
}
- std::cout << "Load complex with " << complex.num_vertices() << " vertices" << std::endl;
+ std::clog << "Load complex with " << complex.num_vertices() << " vertices" << std::endl;
int num_contractions = atoi(argv[3]);
@@ -158,10 +158,10 @@ int main(int argc, char *argv[]) {
Gudhi::contraction::make_link_valid_contraction<EdgeProfile>(),
new GH_visitor(complex));
- std::cout << "Contract " << num_contractions << " edges" << std::endl;
+ std::clog << "Contract " << num_contractions << " edges" << std::endl;
contractor.contract_edges(num_contractions);
- std::cout << "Final complex has " <<
+ std::clog << "Final complex has " <<
complex.num_vertices() << " vertices, " <<
complex.num_edges() << " edges and " <<
complex.num_triangles() << " triangles." << std::endl;
diff --git a/src/Contraction/example/Garland_heckbert/Error_quadric.h b/src/Contraction/example/Garland_heckbert/Error_quadric.h
index 49250d7a..ae46232c 100644
--- a/src/Contraction/example/Garland_heckbert/Error_quadric.h
+++ b/src/Contraction/example/Garland_heckbert/Error_quadric.h
@@ -29,7 +29,7 @@ template <typename Point> class Error_quadric {
* Quadric corresponding to the L2 distance to the plane.
*
* According to the notation of Garland Heckbert, they
- * denote a quadric symetric matrix as :
+ * denote a quadric symmetric matrix as :
* Q = [ q11 q12 q13 q14]
* [ q12 q22 q23 q24]
* [ q13 q23 q33 q34]
diff --git a/src/Contraction/example/Rips_contraction.cpp b/src/Contraction/example/Rips_contraction.cpp
index b5ce06c1..547c290e 100644
--- a/src/Contraction/example/Rips_contraction.cpp
+++ b/src/Contraction/example/Rips_contraction.cpp
@@ -39,7 +39,7 @@ void build_rips(ComplexType& complex, double offset) {
int main(int argc, char *argv[]) {
if (argc != 3) {
std::cerr << "Usage " << argv[0] << " ../../../data/meshes/SO3_10000.off 0.3 to load the file " <<
- "../../data/SO3_10000.off and contract the Rips complex built with paremeter 0.3.\n";
+ "../../data/SO3_10000.off and contract the Rips complex built with parameter 0.3.\n";
return -1;
}
@@ -52,13 +52,13 @@ int main(int argc, char *argv[]) {
return EXIT_FAILURE;
}
- std::cout << "Build the Rips complex with " << complex.num_vertices() << " vertices" << std::endl;
+ std::clog << "Build the Rips complex with " << complex.num_vertices() << " vertices" << std::endl;
build_rips(complex, atof(argv[2]));
Gudhi::Clock contraction_chrono("Time to simplify and enumerate simplices");
- std::cout << "Initial complex has " <<
+ std::clog << "Initial complex has " <<
complex.num_vertices() << " vertices and " <<
complex.num_edges() << " edges" << std::endl;
@@ -69,16 +69,16 @@ int main(int argc, char *argv[]) {
Gudhi::contraction::make_remove_popable_blockers_visitor<Profile>());
contractor.contract_edges();
- std::cout << "Counting final number of simplices \n";
+ std::clog << "Counting final number of simplices \n";
unsigned num_simplices = std::distance(complex.complex_simplex_range().begin(), complex.complex_simplex_range().end());
- std::cout << "Final complex has " <<
+ std::clog << "Final complex has " <<
complex.num_vertices() << " vertices, " <<
complex.num_edges() << " edges, " <<
complex.num_blockers() << " blockers and " <<
num_simplices << " simplices" << std::endl;
- std::cout << contraction_chrono;
+ std::clog << contraction_chrono;
return EXIT_SUCCESS;
}
diff --git a/src/Contraction/include/gudhi/Edge_contraction.h b/src/Contraction/include/gudhi/Edge_contraction.h
index 6058d64b..dff6dc14 100644
--- a/src/Contraction/include/gudhi/Edge_contraction.h
+++ b/src/Contraction/include/gudhi/Edge_contraction.h
@@ -26,6 +26,7 @@ namespace contraction {
/** \defgroup contr Edge contraction
+@{
\author David Salinas
@@ -45,9 +46,9 @@ the operations needed for edge contraction algorithms have polynomial complexity
Therefore, the simplification can be done without enumerating the set of simplices that is often non tracktable in high-dimension and is then very efficient
(sub-linear with regards to the number of simplices in practice).
-A typical application of this package is homology group computation. It is illustrated in the next figure where a Rips complex is built uppon a set of high-dimensional points and
+A typical application of this package is homology group computation. It is illustrated in the next figure where a Rips complex is built upon a set of high-dimensional points and
simplified with edge contractions.
-It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example bellow).
+It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example below).
One can then compute homology group with a simplicial complex having very few simplices instead of running the homology algorithm on the much bigger initial set of
simplices which would take much more time and memory.
@@ -64,7 +65,7 @@ This class design is policy based and heavily inspired from the similar edge col
Four policies can be customized in this package:
\li Cost_policy: specify how much cost an edge contraction of a given edge. The edge with lowest cost is iteratively picked and contracted if valid.
-\li Valid_contraction_policy: specify if a given edge contraction is valid. For instance, this policy can check the link condition which ensures that the homotopy type is preserved afer the edge contraction.
+\li Valid_contraction_policy: specify if a given edge contraction is valid. For instance, this policy can check the link condition which ensures that the homotopy type is preserved after the edge contraction.
\li Placement_policy: every time an edge is contracted, its points are merge to one point specified by this policy. This may be the middle of the edge of some more sophisticated point such as the minimum of a cost as in
\cite Garland.
@@ -91,7 +92,7 @@ Despite this package is able to deal with \a arbitrary simplicial complexes (any
it is still \a 65% times faster than the CGAL package which is focused on 2-manifold.
The main reason is that few blockers appears during the simplification and hence,
the algorithm only have to deal with the graph and not higher-dimensional simplices
-(in this case triangles). However, we recall that higher-dimensional simplices are \a implicitely
+(in this case triangles). However, we recall that higher-dimensional simplices are \a implicitly
stored in the \ref skbl data-structure. Hence, one has to store
simplices in an external map if some information needs to be associated with them (information that could be a filtration value or
an orientation for instance).
@@ -152,7 +153,7 @@ void build_rips(ComplexType& complex, double offset){
int main (int argc, char *argv[])
{
if (argc!=3){
- std::cerr << "Usage "<<argv[0]<<" ../../data/SO3_10000.off 0.3 to load the file ../../data/SO3_10000.off and contract the Rips complex built with paremeter 0.3.\n";
+ std::cerr << "Usage "<<argv[0]<<" ../../data/SO3_10000.off 0.3 to load the file ../../data/SO3_10000.off and contract the Rips complex built with parameter 0.3.\n";
return -1;
}
@@ -164,13 +165,13 @@ int main (int argc, char *argv[])
std::cerr << "Unable to read file:"<<argv[1]<<std::endl;
return EXIT_FAILURE;
}
- std::cout << "Build the Rips complex"<<std::endl;
+ std::clog << "Build the Rips complex"<<std::endl;
build_rips(complex,atof(argv[2]));
boost::timer::auto_cpu_timer t;
- std::cout << "Initial complex has "<<
+ std::clog << "Initial complex has "<<
complex.num_vertices()<<" vertices and "<<
complex.num_edges()<<" edges"<<std::endl;
@@ -181,21 +182,20 @@ int main (int argc, char *argv[])
contraction::make_remove_popable_blockers_visitor<Profile>());
contractor.contract_edges();
- std::cout << "Counting final number of simplices \n";
+ std::clog << "Counting final number of simplices \n";
unsigned num_simplices = std::distance(complex.star_simplex_range().begin(),complex.star_simplex_range().end());
- std::cout << "Final complex has "<<
+ std::clog << "Final complex has "<<
complex.num_vertices()<<" vertices, "<<
complex.num_edges()<<" edges, "<<
complex.num_blockers()<<" blockers and "<<
num_simplices<<" simplices"<<std::endl;
- std::cout << "Time to simplify and enumerate simplices:\n";
+ std::clog << "Time to simplify and enumerate simplices:\n";
return EXIT_SUCCESS;
}
-}
\endcode
\verbatim
diff --git a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
index a0d9f2b2..6911ca2e 100644
--- a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
+++ b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
@@ -171,8 +171,13 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
Self const* algorithm_;
};
+#if CGAL_VERSION_NR < 1050500000
typedef CGAL::Modifiable_priority_queue<Edge_handle, Compare_cost, Undirected_edge_id> PQ;
- typedef typename PQ::handle pq_handle;
+#else
+ typedef CGAL::Modifiable_priority_queue<Edge_handle, Compare_cost, Undirected_edge_id, CGAL::CGAL_BOOST_PENDING_RELAXED_HEAP> PQ;
+#endif
+
+ typedef bool pq_handle;
// An Edge_data is associated with EVERY edge in the complex (collapsible or not).
@@ -196,7 +201,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
bool is_in_PQ() const {
- return PQHandle_ != PQ::null_handle();
+ return PQHandle_ != false;
}
void set_PQ_handle(pq_handle h) {
@@ -204,7 +209,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void reset_PQ_handle() {
- PQHandle_ = PQ::null_handle();
+ PQHandle_ = false;
}
private:
@@ -238,16 +243,22 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void insert_in_PQ(Edge_handle edge, Edge_data& data) {
- data.set_PQ_handle(heap_PQ_->push(edge));
+ heap_PQ_->push(edge);
+ data.set_PQ_handle(true);
++current_num_edges_heap_;
}
void update_in_PQ(Edge_handle edge, Edge_data& data) {
+#if CGAL_VERSION_NR < 1050500000
data.set_PQ_handle(heap_PQ_->update(edge, data.PQ_handle()));
+#else
+ heap_PQ_->update(edge);
+#endif
}
void remove_from_PQ(Edge_handle edge, Edge_data& data) {
- data.set_PQ_handle(heap_PQ_->erase(edge, data.PQ_handle()));
+ heap_PQ_->erase(edge);
+ data.set_PQ_handle(false);
--current_num_edges_heap_;
}
@@ -280,7 +291,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
std::size_t id = 0;
- // xxx do a parralel for
+ // xxx do a parallel for
for (auto edge : complex_.edge_range()) {
complex_[edge].index() = id++;
Profile const& profile = create_profile(edge);
@@ -474,7 +485,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void update_changed_edges() {
- // xxx do a parralel for
+ // xxx do a parallel for
DBG("update edges");
// sequential loop
@@ -530,7 +541,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
// by definition of a blocker
// todo uniqument utile pour la link condition
- // laisser a l'utilisateur ? booleen update_heap_on_removed_blocker?
+ // laisser a l'utilisateur ? boolean update_heap_on_removed_blocker?
Simplex blocker_copy(*blocker);
for (auto x = blocker_copy.begin(); x != blocker_copy.end(); ++x) {
for (auto y = x; ++y != blocker_copy.end();) {
diff --git a/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h b/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h
new file mode 100644
index 00000000..210d804e
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h
@@ -0,0 +1,46 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_FUNCTION_FOR_IMPLICIT_MANIFOLD_H_
+#define CONCEPT_COXETER_TRIANGULATION_FUNCTION_FOR_IMPLICIT_MANIFOLD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept FunctionForImplicitManifold describes the requirements
+ * for a type to implement an implicit function class used for example in Manifold_tracing.
+ */
+struct FunctionForImplicitManifold {
+ /** \brief Value of the function at a specified point 'p'.
+ * @param[in] p The input point given by its Cartesian coordinates.
+ * Its size needs to be equal to amb_d().
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const;
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const;
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const;
+
+ /** \brief Returns a point on the zero-set of the function. */
+ Eigen::VectorXd seed() const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/IntersectionOracle.h b/src/Coxeter_triangulation/concept/IntersectionOracle.h
new file mode 100644
index 00000000..e4e397fa
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/IntersectionOracle.h
@@ -0,0 +1,104 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_INTERSECTION_ORACLE_H_
+#define CONCEPT_COXETER_TRIANGULATION_INTERSECTION_ORACLE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept IntersectionOracle describes the requirements
+ * for a type to implement an intersection oracle class used for example in Manifold_tracing.
+ *
+ */
+struct IntersectionOracle {
+ /** \brief Returns the domain (ambient) dimension of the underlying manifold. */
+ std::size_t amb_d() const;
+
+ /** \brief Returns the codomain dimension of the underlying manifold. */
+ std::size_t cod_d() const;
+
+ /** \brief Intersection query with the relative interior of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the relative interior of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the manifold
+ * (the codomain dimension of the function).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects(const Simplex_handle& simplex, const Triangulation& triangulation) const;
+
+ /** \brief Intersection query with the boundary of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the boundary of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the boundary of the manifold
+ * (the codomain dimension of the function + 1).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects_boundary(const Simplex_handle& simplex,
+ const Triangulation& triangulation) const;
+
+ /** \brief Returns true if the input point lies inside the piecewise-linear
+ * domain induced by the given ambient triangulation that defines the relative
+ * interior of the piecewise-linear approximation of the manifold.
+ *
+ * @param p The input point. Needs to have the same dimension as the ambient
+ * dimension of the manifold (the domain dimension of the function).
+ * @param triangulation The ambient triangulation. Needs to have the same
+ * dimension as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Triangulation>
+ bool lies_in_domain(const Eigen::VectorXd& p, const Triangulation& triangulation) const {
+ Eigen::VectorXd pl_p = make_pl_approximation(domain_fun_, triangulation)(p);
+ return pl_p(0) < 0;
+ }
+
+ /** \brief Returns the function that defines the interior of the manifold */
+ const Function_& function() const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h b/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h
new file mode 100644
index 00000000..dac8e66d
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h
@@ -0,0 +1,81 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_SIMPLEX_IN_COXETER_TRIANGULATION_H_
+#define CONCEPT_COXETER_TRIANGULATION_SIMPLEX_IN_COXETER_TRIANGULATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <gudhi/Permutahedral_representation.h>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept SimplexInCoxeterTriangulation describes the requirements
+ * for a type to implement a representation of simplices in Freudenthal_triangulation
+ * or in Coxeter_triangulation.
+ */
+struct SimplexInCoxeterTriangulation {
+ /** \brief Type of the vertex. */
+ typedef Vertex_ Vertex;
+
+ /** \brief Type of the ordered partition. */
+ typedef Ordered_set_partition_ OrderedSetPartition;
+
+ /** \brief Dimension of the simplex. */
+ unsigned dimension() const;
+
+ /** \brief Type of a range of vertices, each of type Vertex. */
+ typedef Vertex_range;
+
+ /** \brief Returns a range of vertices of the simplex.
+ */
+ Vertex_range vertex_range() const;
+
+ /** \brief Type of a range of faces, each of type that
+ * is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ typedef Face_range;
+
+ /** \brief Returns a range of permutahedral representations of k-dimensional faces
+ * of the simplex for some given integer parameter 'k'.
+ */
+ Face_range face_range(std::size_t k) const;
+
+ /** \brief Returns a range of permutahedral representations of facets of the simplex.
+ * The dimension of the simplex must be strictly positive.
+ */
+ Face_range facet_range() const;
+
+ /** \brief Type of a range of cofaces, each of type that
+ * is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ typedef Coface_range;
+
+ /** \brief Returns a range of permutahedral representations of k-dimensional cofaces
+ * of the simplex for some given integer parameter 'k'.
+ */
+ Coface_range coface_range(std::size_t k) const;
+
+ /** \brief Returns a range of permutahedral representations of cofacets of the simplex.
+ * The dimension of the simplex must be strictly different from the ambient dimension.
+ */
+ Coface_range cofacet_range() const;
+
+ /** \brief Returns true, if the simplex is a face of other simplex. */
+ bool is_face_of(const Permutahedral_representation& other) const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h b/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h
new file mode 100644
index 00000000..2b5d568c
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h
@@ -0,0 +1,56 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_TRIANGULATION_FOR_MANIFOLD_TRACING_H_
+#define CONCEPT_COXETER_TRIANGULATION_TRIANGULATION_FOR_MANIFOLD_TRACING_H_
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept TriangulationForManifoldTracing describes the requirements
+ * for a type to implement a triangulation class used for example in Manifold_tracing.
+ */
+struct TriangulationForManifoldTracing {
+ /** \brief Type of the simplices in the triangulation.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation. */
+ typedef Simplex_handle;
+
+ /** \brief Type of the vertices in the triangulation.
+ * Needs to be a random-access range of integer values. */
+ typedef Vertex_handle;
+
+ /** \brief Returns the permutahedral representation of the simplex in the
+ * triangulation that contains a given query point 'p'.
+ * \tparam Point_d A class that represents a point in d-dimensional Euclidean space.
+ * The coordinates should be random-accessible. Needs to provide the method size().
+ * @param[in] point The query point.
+ */
+ template <class Point_d>
+ Simplex_handle locate_point(const Point_d& point) const;
+
+ /** \brief Returns the Cartesian coordinates of the given vertex 'v'.
+ * @param[in] v The input vertex.
+ */
+ Eigen::VectorXd cartesian_coordinates(const Vertex_handle& v) const;
+
+ /** \brief Returns the Cartesian coordinates of the barycenter of a given simplex 's'.
+ * @param[in] s The input simplex given by permutahedral representation.
+ */
+ Eigen::VectorXd barycenter(const Simplex_handle& s) const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/doc/custom_function.png b/src/Coxeter_triangulation/doc/custom_function.png
new file mode 100644
index 00000000..8bb8ba9a
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/custom_function.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png b/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png
new file mode 100644
index 00000000..338b39fe
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h b/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h
new file mode 100644
index 00000000..395996c9
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h
@@ -0,0 +1,240 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
+#define DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
+
+// needs namespaces for Doxygen to link on classes
+namespace Gudhi {
+namespace coxeter_triangulation {
+
+/** \defgroup coxeter_triangulation Coxeter triangulation
+
+\author Siargey Kachanovich
+
+@{
+
+\section overview Module overview
+
+Coxeter triangulation module is designed to provide tools for constructing a piecewise-linear approximation of an
+\f$m\f$-dimensional smooth manifold embedded in \f$ \mathbb{R}^d \f$ using an ambient triangulation.
+For a more detailed description of the module see \cite KachanovichThesis.
+
+\section manifoldtracing Manifold tracing algorithm
+The central piece of the module is the manifold tracing algorithm represented by the class
+\ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing".
+The manifold tracing algorithm takes as input a manifold of some dimension \f$m\f$ embedded in \f$\mathbb{R}^d\f$
+represented by an intersection oracle (see Section \ref intersectionoracle "Intersection oracle"), a point on the
+manifold and an ambient triangulation (see Section \ref ambienttriangulations "Ambient triangulations").
+The output consists of one map (or two maps in the case of manifolds with boundary) from the \f$(d-m)\f$-dimensional
+(and \f$(d-m+1)\f$-dimensional in the case of manifolds with boundary) simplices in the ambient triangulation that
+intersect the manifold to their intersection points.
+From this output, it is possible to construct the cell complex of the piecewise-linear approximation of the input
+manifold.
+
+There are two methods that execute the manifold tracing algorithm: the method
+\ref Gudhi::coxeter_triangulation::Manifold_tracing::manifold_tracing_algorithm() "Manifold_tracing::manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map)"
+for manifolds without boundary and
+\ref Gudhi::coxeter_triangulation::Manifold_tracing::manifold_tracing_algorithm() "Manifold_tracing::manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map,boundary_simplex_map)"
+for manifolds with boundary. The algorithm functions as follows. It starts at the specified seed points and inserts a
+\f$(d-m)\f$-dimensional simplices nearby each seed point that intersect the manifold into the output. Starting from
+this simplex, the algorithm propagates the search for other \f$(d-m)\f$-dimensional simplices that intersect the
+manifold by marching from a simplex to neighbouring simplices via their common cofaces.
+
+This class \ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing" has one template parameter
+`Triangulation_` which specifies the ambient triangulation which is used by the algorithm.
+The template type `Triangulation_` has to be a model of the concept
+\ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+
+The module also provides two static methods:
+\ref Gudhi::coxeter_triangulation::manifold_tracing_algorithm() "manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map)"
+for manifolds without boundary and
+\ref manifold_tracing_algorithm() "manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map, boundary_simplex_map)"
+for manifolds with boundary. For these static methods it is not necessary to specify any template arguments.
+
+\section ambienttriangulations Ambient triangulations
+
+The ambient triangulations supported by the manifold tracing algorithm have to be models of the concept
+\ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+This module offers two such models: the class
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" and the derived class
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation "Coxeter_triangulation".
+
+Both these classes encode affine transformations of the so-called Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$.
+The Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$ is defined as the simplicial subdivision of the unit cubic
+partition of \f$\mathbb{R}^d\f$.
+Each simplex is encoded using the permutahedral representation, which consists of an integer-valued vector \f$y\f$ that
+positions the simplex in a specific cube in the cubical partition and an ordered partition \f$\omega\f$ of the set
+\f$\{1,\ldots,d+1\}\f$, which positions the simplex in the simplicial subdivision of the cube.
+The default constructor
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation::Freudenthal_triangulation(std::size_t)
+"Freudenthal_triangulation(d)" the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$. The class
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" can also encode any affine
+transformation of the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$ using an invertible matrix \f$\Lambda\f$ and
+an offset vector \f$b\f$ that can be specified in the constructor and which can be changed using the methods
+change_matrix and change_offset. The class
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation "Coxeter_triangulation" is derived from
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" and its default constructor
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation::Coxeter_triangulation(std::size_t) "Coxeter_triangulation(d)"
+builds a Coxeter triangulation of type \f$\tilde{A}_d\f$, which has the best simplex quality of all linear
+transformations of the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$.
+
+\image html two_triangulations.png "Coxeter (on the left) and Freudenthal-Kuhn triangulation (on the right)"
+
+
+\section intersectionoracle Intersection oracle
+
+The input \f$m\f$-dimensional manifold in \f$\mathbb{R}^d\f$ needs to be given via the intersection oracle that answers
+the following query: given a \f$(d-m)\f$-dimensional simplex, does it intersect the manifold?
+The concept \ref Gudhi::coxeter_triangulation::IntersectionOracle "IntersectionOracle" describes all requirements for
+an intersection oracle class to be compatible with the class
+\ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing".
+This module offers one model of the concept
+\ref Gudhi::coxeter_triangulation::IntersectionOracle "IntersectionOracle", which is the class
+\ref Gudhi::coxeter_triangulation::Implicit_manifold_intersection_oracle "Implicit_manifold_intersection_oracle".
+This class represents a manifold given as the zero-set of a specified function
+\f$F: \mathbb{R}^d \rightarrow \mathbb{R}^{d-m}\f$.
+The function \f$F\f$ is given by a class which is a model of the concept
+\ref Gudhi::coxeter_triangulation::FunctionForImplicitManifold "FunctionForImplicitManifold".
+There are multiple function classes that are already implemented in this module.
+
+\li \ref Gudhi::coxeter_triangulation::Constant_function(std::size_t, std::size_t, Eigen::VectorXd)
+"Constant_function(d,k,v)" defines a constant function \f$F\f$ such that for all \f$x \in \mathbb{R}^d\f$, we have
+ \f$F(x) = v \in \mathbb{R}^k\f$.
+ The class Constant_function does not define an implicit manifold, but is useful as the domain function when defining
+ boundaryless implicit manifolds.
+\li \ref Gudhi::coxeter_triangulation::Function_affine_plane_in_Rd(N,b) "Function_affine_plane_in_Rd(N,b)" defines an
+ \f$m\f$-dimensional implicit affine plane in the \f$d\f$-dimensional Euclidean space given by a normal matrix \f$N\f$
+ and an offset vector \f$b\f$.
+\li \ref Gudhi::coxeter_triangulation::Function_Sm_in_Rd(r,m,d,center) "Function_Sm_in_Rd(r,m,d,center)" defines an
+ \f$m\f$-dimensional implicit sphere embedded in the \f$d\f$-dimensional Euclidean space of radius \f$r\f$ centered at
+ the point 'center'.
+\li \ref Gudhi::coxeter_triangulation::Function_moment_curve_in_Rd(r,d) "Function_moment_curve(r,d)" defines the moment
+ curve in the \f$d\f$-dimensional Euclidean space of radius \f$r\f$ given as the parameterized curve (but implemented
+ as an implicit curve):
+ \f[ (r, rt, \ldots, rt^{d-1}) \in \mathbb{R}^d,\text{ for $t \in \mathbb{R}$.} \f]
+\li \ref Gudhi::coxeter_triangulation::Function_torus_in_R3(R, r) "Function_torus_in_R3(R, r)" defines a torus in
+ \f$\mathbb{R}^3\f$ with the outer radius \f$R\f$ and the inner radius, given by the equation:
+ \f[ z^2 + (\sqrt{x^2 + y^2} - r)^2 - R^2 = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_chair_in_R3(a, b, k) "Function_chair_in_R3(a, b, k)" defines the
+ \"Chair\" surface in \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ (x^2 + y^2 + z^2 - ak^2)^2 - b((z-k)^2 - 2x^2)((z+k)^2 - 2y^2) = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_iron_in_R3() "Function_iron_in_R3()" defines the \"Iron\" surface in
+ \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ \frac{-x^6-y^6-z^6}{300} + \frac{xy^2z}{2.1} + y^2 + (z-2)^2 = 1. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_lemniscate_revolution_in_R3(a) "Function_lemniscate_revolution_in_R3(a)"
+ defines a revolution surface in \f$\mathbb{R}^3\f$ obtained from the lemniscate of Bernoulli defined by the equation:
+ \f[ (x^2 + y^2 + z^2)^2 - 2a^2(x^2 - y^2 - z^2) = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_whitney_umbrella_in_R3() "Function_whitney_umbrella_in_R3()" defines
+ the Whitney umbrella surface in \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ x^2 - y^2z = 0. \f]
+
+The base function classes above can be composed or modified into new functions using the following classes and methods:
+
+\li \ref Gudhi::coxeter_triangulation::Cartesian_product "Cartesian_product(functions...)" expresses the Cartesian
+ product \f$F_1^{-1}(0) \times \ldots \times F_k^{-1}(0)\f$ of multiple implicit manifolds as an implicit manifold.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_product_function() "make_product_function(functions...)" is provided that
+ takes a pack of function-typed objects as the argument.
+\li \ref Gudhi::coxeter_triangulation::Embed_in_Rd "Embed_in_Rd(F, d)" expresses an implicit manifold given as the
+ zero-set of a function \f$F\f$ embedded in a higher-dimensional Euclidean space \f$\mathbb{R}^d\f$.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::make_embedding() "make_embedding(F, d)" is
+ provided.
+\li \ref Gudhi::coxeter_triangulation::Linear_transformation "Linear_transformation(F, M)" applies a linear
+ transformation given by a matrix \f$M\f$ on an implicit manifold given as the zero-set of the function \f$F\f$.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_linear_transformation() "make_linear_transformation(F, M)" is provided.
+\li \ref Gudhi::coxeter_triangulation::Translate "Translate(F, v)" translates an implicit manifold given as the
+ zero-set of ththe function \f$F\f$ by a vector \f$v\f$.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::translate() "translate(F, v)" is provided.
+\li \ref Gudhi::coxeter_triangulation::Negation() "Negation(F)" defines the negative of the given function \f$F\f$.
+ This class is useful to define the complementary of a given domain, when defining a manifold with boundary.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::negation() "negation(F)" is provided.
+\li \ref Gudhi::coxeter_triangulation::PL_approximation "PL_approximation(F, T)" defines a piecewise-linear
+ approximation of a given function \f$F\f$ induced by an ambient triangulation \f$T\f$.
+ The purpose of this class is to define a piecewise-linear function that is compatible with the requirements for the
+ domain function \f$D\f$ when defining a manifold with boundary.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_pl_approximation() "make_pl_approximation(F, T)" is provided.
+ The type of \f$T\f$ is required to be a model of the concept
+ \ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+
+It is also possible to implement your own function as detailed in this \ref exampleswithcustomfunction.
+
+\section cellcomplex Cell complex construction
+
+The output of the manifold tracing algorithm can be transformed into the Hasse diagram of a cell complex that
+approximates the input manifold using the class \ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex".
+The type of the cells in the Hasse diagram is
+\ref Gudhi::Hasse_diagram::Hasse_diagram_cell "Hasse_cell<int, double, bool>" provided by the module Hasse diagram.
+The cells in the cell complex given by an object of the class
+\ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex" are accessed through several maps that are accessed
+through the following methods.
+
+\li The method
+\ref Gudhi::coxeter_triangulation::Cell_complex::interior_simplex_cell_maps() "interior_simplex_cell_maps()"
+returns a vector of maps from the cells of various dimensions in the interior of the cell complex to the permutahedral
+representations of the corresponding simplices in the ambient triangulation.
+Each individual map for cells of a specific dimension \f$l\f$ can be accessed using the method
+\ref Gudhi::coxeter_triangulation::Cell_complex::interior_simplex_cell_map() "interior_simplex_cell_map(l)".
+\li The method
+\ref Gudhi::coxeter_triangulation::Cell_complex::boundary_simplex_cell_maps() "boundary_simplex_cell_maps()"
+returns a vector of maps from the cells of various dimensions on the boundary of the cell complex to the permutahedral
+representations of the corresponding simplices in the ambient triangulation.
+Each individual map for cells of a specific dimension \f$l\f$ can be accessed using the method
+\ref Gudhi::coxeter_triangulation::Cell_complex::boundary_simplex_cell_map() "boundary_simplex_cell_map(l)".
+\li The method \ref Gudhi::coxeter_triangulation::Cell_complex::cell_simplex_map() "cell_simplex_map()" returns a map
+from the cells in the cell complex to the permutahedral representations of the corresponding simplices in the ambient
+triangulation.
+\li The method \ref Gudhi::coxeter_triangulation::Cell_complex::cell_point_map() "cell_point_map()" returns a map from
+the vertex cells in the cell complex to their Cartesian coordinates.
+
+The use and interfaces of this \ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex" is limited to the
+Coxeter_triangulation implementation.
+
+\section example Examples
+
+\subsection examplewithoutboundaries Basic example without boundaries
+\include cell_complex_from_basic_circle_manifold.cpp
+
+The program output is:
+
+\include cell_complex_from_basic_circle_manifold_for_doc.txt
+
+\subsection exampleswithboundaries Example with boundaries
+
+Here is an example of constructing a piecewise-linear approximation of a flat torus embedded in \f$\mathbb{R}^4\f$,
+rotated by a random rotation in \f$\mathbb{R}^4\f$ and cut by a hyperplane.
+
+\include manifold_tracing_flat_torus_with_boundary.cpp
+
+The output in <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">medit</a> is:
+
+\image html "flat_torus_with_boundary.png" "Output from the example of a flat torus with boundary"
+
+\subsection exampleswithcustomfunction Example with a custom function
+
+In the following more complex example, we define a custom function for the implicit manifold.
+
+\include manifold_tracing_custom_function.cpp
+
+The output in <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">medit</a> looks as follows:
+
+\image html "custom_function.png" "Output from the example with a custom function"
+
+
+ */
+/** @} */ // end defgroup coxeter_triangulation
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
diff --git a/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png b/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png
new file mode 100644
index 00000000..04912729
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/two_triangulations.png b/src/Coxeter_triangulation/doc/two_triangulations.png
new file mode 100644
index 00000000..055d93e7
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/two_triangulations.png
Binary files differ
diff --git a/src/Coxeter_triangulation/example/CMakeLists.txt b/src/Coxeter_triangulation/example/CMakeLists.txt
new file mode 100644
index 00000000..7f81c599
--- /dev/null
+++ b/src/Coxeter_triangulation/example/CMakeLists.txt
@@ -0,0 +1,19 @@
+project(Coxeter_triangulation_example)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+ # because of random_orthogonal_matrix inclusion
+ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_executable ( Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example manifold_tracing_flat_torus_with_boundary.cpp )
+ target_link_libraries(Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example ${CGAL_LIBRARY})
+ add_test(NAME Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example>)
+ endif()
+
+ add_executable ( Coxeter_triangulation_manifold_tracing_custom_function_example manifold_tracing_custom_function.cpp )
+ add_test(NAME Coxeter_triangulation_manifold_tracing_custom_function_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_manifold_tracing_custom_function_example>)
+
+ add_executable ( Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example cell_complex_from_basic_circle_manifold.cpp )
+ add_test(NAME Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example>)
+endif() \ No newline at end of file
diff --git a/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp
new file mode 100644
index 00000000..dfaaffa8
--- /dev/null
+++ b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp
@@ -0,0 +1,55 @@
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h> // for Gudhi::coxeter_triangulation::make_oracle
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+int main(int argc, char** argv) {
+ // Oracle is a circle of radius 1
+ double radius = 1.;
+ auto oracle = make_oracle(Function_Sm_in_Rd(radius, 1));
+
+ // Define a Coxeter triangulation.
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // Theory forbids that a vertex of the triangulation lies exactly on the circle.
+ // Add some offset to avoid algorithm degeneracies.
+ cox_tr.change_offset(-Eigen::VectorXd::Random(oracle.amb_d()));
+ // For a better manifold approximation, one can change the circle radius value or change the linear transformation
+ // matrix.
+ // The number of points and edges will increase with a better resolution.
+ //cox_tr.change_matrix(0.5 * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using Out_simplex_map = typename Manifold_tracing<Coxeter_triangulation<> >::Out_simplex_map;
+
+ std::vector<Eigen::VectorXd> seed_points(1, oracle.seed());
+ Out_simplex_map interior_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map);
+
+ // List of Hasse_cell pointers to retrieve vertices values from edges
+ std::map<Cell_complex<Out_simplex_map>::Hasse_cell*, std::size_t> vi_map;
+ std::size_t index = 0;
+
+ std::clog << "Vertices:" << std::endl;
+ for (const auto& cp_pair : cell_complex.cell_point_map()) {
+ std::clog << index << " : (" << cp_pair.second(0) << ", " << cp_pair.second(1) << ")" << std::endl;
+ vi_map.emplace(cp_pair.first, index++);
+ }
+
+ std::clog << "Edges:" << std::endl;
+ for (const auto& sc_pair : cell_complex.interior_simplex_cell_map(1)) {
+ Cell_complex<Out_simplex_map>::Hasse_cell* edge_cell = sc_pair.second;
+ for (const auto& vi_pair : edge_cell->get_boundary()) std::clog << vi_map[vi_pair.first] << " ";
+ std::clog << std::endl;
+ }
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt
new file mode 100644
index 00000000..b323cca3
--- /dev/null
+++ b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt
@@ -0,0 +1,26 @@
+Vertices:
+0 : (-0.680375, 0.523483)
+1 : (0.147642, 0.887879)
+2 : (-0.847996, 0.30801)
+3 : (-0.881369, 0.0951903)
+4 : (0.638494, -0.550215)
+5 : (0.415344, 0.843848)
+6 : (0.812453, -0.0815816)
+7 : (0.319625, -0.7709)
+8 : (0.319625, 0.889605)
+9 : (0.579487, 0.638553)
+10 : (-0.680375, -0.461325)
+11 : (-0.364269, -0.760962)
+Edges:
+3 2
+3 10
+10 11
+11 7
+7 4
+2 0
+0 1
+6 9
+6 4
+1 8
+8 5
+5 9
diff --git a/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp b/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp
new file mode 100644
index 00000000..fe2051bb
--- /dev/null
+++ b/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp
@@ -0,0 +1,87 @@
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/Linear_transformation.h>
+
+#include <gudhi/IO/build_mesh_from_cell_complex.h>
+#include <gudhi/IO/output_meshes_to_medit.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+/* A definition of a function that defines a 2d surface embedded in R^4, but that normally
+ * lives on a complex projective plane.
+ * In terms of harmonic coordinates [x:y:z] of points on the complex projective plane,
+ * the equation of the manifold is x^3*y + y^3*z + z^3*x = 0.
+ * The embedding consists of restricting the manifold to the affine subspace z = 1.
+ */
+struct Function_surface_on_CP2_in_R4 {
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ // The real and imaginary parts of the variables x and y
+ double xr = p(0), xi = p(1), yr = p(2), yi = p(3);
+ Eigen::VectorXd result(cod_d());
+
+ // Squares and cubes of real and imaginary parts used in the computations
+ double xr2 = xr * xr, xi2 = xi * xi, yr2 = yr * yr, yi2 = yi * yi, xr3 = xr2 * xr, xi3 = xi2 * xi, yr3 = yr2 * yr,
+ yi3 = yi2 * yi;
+
+ // The first coordinate of the output is Re(x^3*y + y^3 + x)
+ result(0) = xr3 * yr - 3 * xr * xi2 * yr - 3 * xr2 * xi * yi + xi3 * yi + yr3 - 3 * yr * yi2 + xr;
+ // The second coordinate of the output is Im(x^3*y + y^3 + x)
+ result(1) = 3 * xr2 * xi * yr + xr3 * yi - 3 * xr * xi2 * yi - xi3 * yr + 3 * yr2 * yi - yi3 + xi;
+ return result;
+ }
+
+ std::size_t amb_d() const { return 4; };
+ std::size_t cod_d() const { return 2; };
+
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(4);
+ return result;
+ }
+
+ Function_surface_on_CP2_in_R4() {}
+};
+
+int main(int argc, char** argv) {
+ // The function for the (non-compact) manifold
+ Function_surface_on_CP2_in_R4 fun;
+
+ // Seed of the function
+ Eigen::VectorXd seed = fun.seed();
+
+ // Creating the function that defines the boundary of a compact region on the manifold
+ double radius = 3.0;
+ Function_Sm_in_Rd fun_sph(radius, 3, seed);
+
+ // Defining the intersection oracle
+ auto oracle = make_oracle(fun, fun_sph);
+
+ // Define a Coxeter triangulation scaled by a factor lambda.
+ // The triangulation is translated by a random vector to avoid violating the genericity hypothesis.
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ // Output the cell complex to a file readable by medit
+ output_meshes_to_medit(3, "manifold_on_CP2_with_boundary",
+ build_mesh_from_cell_complex(cell_complex, Configuration(true, true, true, 1, 5, 3),
+ Configuration(true, true, true, 2, 13, 14)));
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp b/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp
new file mode 100644
index 00000000..59fe2e2b
--- /dev/null
+++ b/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp
@@ -0,0 +1,72 @@
+// workaround for the annoying boost message in boost 1.69
+#define BOOST_PENDING_INTEGER_LOG2_HPP
+#include <boost/integer/integer_log2.hpp>
+// end workaround
+
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_affine_plane_in_Rd.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Cartesian_product.h>
+#include <gudhi/Functions/Linear_transformation.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/random_orthogonal_matrix.h> // requires CGAL
+
+#include <gudhi/IO/build_mesh_from_cell_complex.h>
+#include <gudhi/IO/output_meshes_to_medit.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+int main(int argc, char** argv) {
+ // Creating a circle S1 in R2 of specified radius
+ double radius = 1.0;
+ Function_Sm_in_Rd fun_circle(radius, 1);
+
+ // Creating a flat torus S1xS1 in R4 from two circle functions
+ auto fun_flat_torus = make_product_function(fun_circle, fun_circle);
+
+ // Apply a random rotation in R4
+ auto matrix = random_orthogonal_matrix(4);
+ auto fun_flat_torus_rotated = make_linear_transformation(fun_flat_torus, matrix);
+
+ // Computing the seed of the function fun_flat_torus
+ Eigen::VectorXd seed = fun_flat_torus_rotated.seed();
+
+ // Defining a domain function that defines the boundary, which is a hyperplane passing by the origin and orthogonal to
+ // x.
+ Eigen::MatrixXd normal_matrix = Eigen::MatrixXd::Zero(4, 1);
+ for (std::size_t i = 0; i < 4; i++) normal_matrix(i, 0) = -seed(i);
+ Function_affine_plane_in_Rd fun_bound(normal_matrix, -seed / 2);
+
+ // Defining the intersection oracle
+ auto oracle = make_oracle(fun_flat_torus_rotated, fun_bound);
+
+ // Define a Coxeter triangulation scaled by a factor lambda.
+ // The triangulation is translated by a random vector to avoid violating the genericity hypothesis.
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ // Output the cell complex to a file readable by medit
+ output_meshes_to_medit(3, "flat_torus_with_boundary",
+ build_mesh_from_cell_complex(cell_complex, Configuration(true, true, true, 1, 5, 3),
+ Configuration(true, true, true, 2, 13, 14)));
+
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h
new file mode 100644
index 00000000..de68acb6
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h
@@ -0,0 +1,77 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef COXETER_TRIANGULATION_H_
+#define COXETER_TRIANGULATION_H_
+
+#include <vector>
+#include <cmath> // for std::sqrt
+
+#include <boost/range/iterator_range.hpp>
+#include <boost/graph/graph_traits.hpp>
+#include <boost/graph/adjacency_list.hpp>
+
+#include <Eigen/Eigenvalues>
+#include <Eigen/Sparse>
+#include <Eigen/SVD>
+
+#include <gudhi/Freudenthal_triangulation.h>
+#include <gudhi/Permutahedral_representation.h>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Coxeter_triangulation
+ * \brief A class that stores Coxeter triangulation of type \f$\tilde{A}_d\f$.
+ * This triangulation has the greatest simplex quality out of all linear transformations
+ * of the Freudenthal-Kuhn triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \tparam Permutahedral_representation_ Type of a simplex given by a permutahedral representation.
+ * Needs to be a model of SimplexInCoxeterTriangulation.
+ */
+template <class Permutahedral_representation_ =
+ Permutahedral_representation<std::vector<int>, std::vector<std::vector<std::size_t> > > >
+class Coxeter_triangulation : public Freudenthal_triangulation<Permutahedral_representation_> {
+ using Matrix = Eigen::MatrixXd;
+
+ Matrix root_matrix(unsigned d) {
+ Matrix cartan(Matrix::Identity(d, d));
+ for (unsigned i = 1; i < d; i++) {
+ cartan(i - 1, i) = -0.5;
+ cartan(i, i - 1) = -0.5;
+ }
+ Eigen::SelfAdjointEigenSolver<Matrix> saes(cartan);
+ Eigen::VectorXd sqrt_diag(d);
+ for (unsigned i = 0; i < d; ++i) sqrt_diag(i) = std::sqrt(saes.eigenvalues()[i]);
+
+ Matrix lower(Matrix::Ones(d, d));
+ lower = lower.triangularView<Eigen::Lower>();
+
+ Matrix result = (lower * saes.eigenvectors() * sqrt_diag.asDiagonal()).inverse();
+ return result;
+ }
+
+ public:
+ /** \brief Constructor of Coxeter triangulation of a given dimension.
+ * @param[in] dimension The dimension of the triangulation.
+ */
+ Coxeter_triangulation(std::size_t dimension)
+ : Freudenthal_triangulation<Permutahedral_representation_>(dimension, root_matrix(dimension)) {}
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h
new file mode 100644
index 00000000..de342ecc
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h
@@ -0,0 +1,340 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CELL_COMPLEX_H_
+#define CELL_COMPLEX_H_
+
+#include <Eigen/Dense>
+
+#include <vector>
+#include <map>
+#include <utility> // for std::make_pair
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/Permutahedral_representation/Simplex_comparator.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h> // for Hasse_cell
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Cell_complex
+ * \brief A class that constructs the cell complex from the output provided by the class
+ * \ref Gudhi::coxeter_triangulation::Manifold_tracing.
+ *
+ * The use and interfaces of this cell complex is limited to the \ref coxeter_triangulation implementation.
+ *
+ * \tparam Out_simplex_map_ The type of a map from a simplex type that is a
+ * model of SimplexInCoxeterTriangulation to Eigen::VectorXd.
+ */
+template <class Out_simplex_map_>
+class Cell_complex {
+ public:
+ /** \brief Type of a simplex in the ambient triangulation.
+ * Is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ using Simplex_handle = typename Out_simplex_map_::key_type;
+ /** \brief Type of a cell in the cell complex.
+ * Always is Gudhi::Hasse_cell from the Hasse diagram module.
+ * The additional information is the boolean that is true if and only if the cell lies
+ * on the boundary.
+ */
+ using Hasse_cell = Gudhi::Hasse_diagram::Hasse_diagram_cell<int, double, bool>;
+ /** \brief Type of a map from permutahedral representations of simplices in the
+ * ambient triangulation to the corresponding cells in the cell complex of some
+ * specific dimension.
+ */
+ using Simplex_cell_map = std::map<Simplex_handle, Hasse_cell*, Simplex_comparator<Simplex_handle> >;
+ /** \brief Type of a vector of maps from permutahedral representations of simplices in the
+ * ambient triangulation to the corresponding cells in the cell complex of various dimensions.
+ */
+ using Simplex_cell_maps = std::vector<Simplex_cell_map>;
+
+ /** \brief Type of a map from cells in the cell complex to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ using Cell_simplex_map = std::map<Hasse_cell*, Simplex_handle>;
+
+ /** \brief Type of a map from vertex cells in the cell complex to the permutahedral representations
+ * of their Cartesian coordinates.
+ */
+ using Cell_point_map = std::map<Hasse_cell*, Eigen::VectorXd>;
+
+ private:
+ Hasse_cell* insert_cell(const Simplex_handle& simplex, std::size_t cell_d, bool is_boundary) {
+ Simplex_cell_maps& simplex_cell_maps = (is_boundary ? boundary_simplex_cell_maps_ : interior_simplex_cell_maps_);
+#ifdef DEBUG_TRACES
+ CC_detail_list& cc_detail_list =
+ (is_boundary ? cc_boundary_detail_lists[cell_d] : cc_interior_detail_lists[cell_d]);
+ cc_detail_list.emplace_back(simplex);
+#endif
+ Simplex_cell_map& simplex_cell_map = simplex_cell_maps[cell_d];
+ auto map_it = simplex_cell_map.find(simplex);
+ if (map_it == simplex_cell_map.end()) {
+ hasse_cells_.push_back(new Hasse_cell(is_boundary, cell_d));
+ Hasse_cell* new_cell = hasse_cells_.back();
+ simplex_cell_map.emplace(simplex, new_cell);
+ cell_simplex_map_.emplace(new_cell, simplex);
+#ifdef DEBUG_TRACES
+ cc_detail_list.back().status_ = CC_detail_info::Result_type::inserted;
+#endif
+ return new_cell;
+ }
+#ifdef DEBUG_TRACES
+ CC_detail_info& cc_info = cc_detail_list.back();
+ cc_info.trigger_ = to_string(map_it->first);
+ cc_info.status_ = CC_detail_info::Result_type::self;
+#endif
+ return map_it->second;
+ }
+
+ void expand_level(std::size_t cell_d) {
+ bool is_manifold_with_boundary = boundary_simplex_cell_maps_.size() > 0;
+ for (auto& sc_pair : interior_simplex_cell_maps_[cell_d - 1]) {
+ const Simplex_handle& simplex = sc_pair.first;
+ Hasse_cell* cell = sc_pair.second;
+ for (Simplex_handle coface : simplex.coface_range(cod_d_ + cell_d)) {
+ Hasse_cell* new_cell = insert_cell(coface, cell_d, false);
+ new_cell->get_boundary().emplace_back(cell, 1);
+ }
+ }
+
+ if (is_manifold_with_boundary) {
+ for (auto& sc_pair : boundary_simplex_cell_maps_[cell_d - 1]) {
+ const Simplex_handle& simplex = sc_pair.first;
+ Hasse_cell* cell = sc_pair.second;
+ if (cell_d != intr_d_)
+ for (Simplex_handle coface : simplex.coface_range(cod_d_ + cell_d + 1)) {
+ Hasse_cell* new_cell = insert_cell(coface, cell_d, true);
+ new_cell->get_boundary().emplace_back(cell, 1);
+ }
+ auto map_it = interior_simplex_cell_maps_[cell_d].find(simplex);
+ if (map_it == interior_simplex_cell_maps_[cell_d].end())
+ std::cerr << "Cell_complex::expand_level error: A boundary cell does not have an interior counterpart.\n";
+ else {
+ Hasse_cell* i_cell = map_it->second;
+ i_cell->get_boundary().emplace_back(cell, 1);
+ }
+ }
+ }
+ }
+
+ void construct_complex_(const Out_simplex_map_& out_simplex_map) {
+#ifdef DEBUG_TRACES
+ cc_interior_summary_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_prejoin_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_detail_lists.resize(interior_simplex_cell_maps_.size());
+#endif
+ for (auto& os_pair : out_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, false);
+ cell_point_map_.emplace(new_cell, point);
+ }
+ for (std::size_t cell_d = 1;
+ cell_d < interior_simplex_cell_maps_.size() && !interior_simplex_cell_maps_[cell_d - 1].empty(); ++cell_d) {
+ expand_level(cell_d);
+ }
+ }
+
+ void construct_complex_(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map) {
+#ifdef DEBUG_TRACES
+ cc_interior_summary_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_prejoin_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_detail_lists.resize(interior_simplex_cell_maps_.size());
+ cc_boundary_summary_lists.resize(boundary_simplex_cell_maps_.size());
+ cc_boundary_prejoin_lists.resize(boundary_simplex_cell_maps_.size());
+ cc_boundary_detail_lists.resize(boundary_simplex_cell_maps_.size());
+#endif
+ for (auto& os_pair : boundary_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, true);
+ cell_point_map_.emplace(new_cell, point);
+ }
+ for (auto& os_pair : interior_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, false);
+ cell_point_map_.emplace(new_cell, point);
+ }
+#ifdef DEBUG_TRACES
+ for (const auto& sc_pair : interior_simplex_cell_maps_[0])
+ cc_interior_summary_lists[0].push_back(CC_summary_info(sc_pair));
+ for (const auto& sc_pair : boundary_simplex_cell_maps_[0])
+ cc_boundary_summary_lists[0].push_back(CC_summary_info(sc_pair));
+#endif
+
+ for (std::size_t cell_d = 1;
+ cell_d < interior_simplex_cell_maps_.size() && !interior_simplex_cell_maps_[cell_d - 1].empty(); ++cell_d) {
+ expand_level(cell_d);
+
+#ifdef DEBUG_TRACES
+ for (const auto& sc_pair : interior_simplex_cell_maps_[cell_d])
+ cc_interior_summary_lists[cell_d].push_back(CC_summary_info(sc_pair));
+ if (cell_d < boundary_simplex_cell_maps_.size())
+ for (const auto& sc_pair : boundary_simplex_cell_maps_[cell_d])
+ cc_boundary_summary_lists[cell_d].push_back(CC_summary_info(sc_pair));
+#endif
+ }
+ }
+
+ public:
+ /**
+ * \brief Constructs the the cell complex that approximates an \f$m\f$-dimensional manifold
+ * without boundary embedded in the \f$ d \f$-dimensional Euclidean space
+ * from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] out_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ */
+ void construct_complex(const Out_simplex_map_& out_simplex_map) {
+ interior_simplex_cell_maps_.resize(intr_d_ + 1);
+ if (!out_simplex_map.empty()) cod_d_ = out_simplex_map.begin()->first.dimension();
+ construct_complex_(out_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the skeleton of the cell complex that approximates
+ * an \f$m\f$-dimensional manifold without boundary embedded
+ * in the \f$d\f$-dimensional Euclidean space
+ * up to a limit dimension from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] out_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] limit_dimension The dimension of the constructed skeleton.
+ */
+ void construct_complex(const Out_simplex_map_& out_simplex_map, std::size_t limit_dimension) {
+ interior_simplex_cell_maps_.resize(limit_dimension + 1);
+ if (!out_simplex_map.empty()) cod_d_ = out_simplex_map.begin()->first.dimension();
+ construct_complex_(out_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the the cell complex that approximates an \f$m\f$-dimensional manifold
+ * with boundary embedded in the \f$ d \f$-dimensional Euclidean space
+ * from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] interior_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] boundary_simplex_map A map from simplices of dimension \f$(d-m+1)\f$
+ * in the ambient triangulation that intersect the boundary of the manifold
+ * to the intersection points.
+ */
+ void construct_complex(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map) {
+ interior_simplex_cell_maps_.resize(intr_d_ + 1);
+ boundary_simplex_cell_maps_.resize(intr_d_);
+ if (!interior_simplex_map.empty()) cod_d_ = interior_simplex_map.begin()->first.dimension();
+ construct_complex_(interior_simplex_map, boundary_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the skeleton of the cell complex that approximates
+ * an \f$m\f$-dimensional manifold with boundary embedded
+ * in the \f$d\f$-dimensional Euclidean space
+ * up to a limit dimension from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] interior_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] boundary_simplex_map A map from simplices of dimension \f$(d-m+1)\f$
+ * in the ambient triangulation that intersect the boundary of the manifold
+ * to the intersection points.
+ * \param[in] limit_dimension The dimension of the constructed skeleton.
+ */
+ void construct_complex(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map,
+ std::size_t limit_dimension) {
+ interior_simplex_cell_maps_.resize(limit_dimension + 1);
+ boundary_simplex_cell_maps_.resize(limit_dimension);
+ if (!interior_simplex_map.empty()) cod_d_ = interior_simplex_map.begin()->first.dimension();
+ construct_complex_(interior_simplex_map, boundary_simplex_map);
+ }
+
+ /**
+ * \brief Returns the dimension of the cell complex.
+ */
+ std::size_t intrinsic_dimension() const { return intr_d_; }
+
+ /**
+ * \brief Returns a vector of maps from the cells of various dimensions in the interior
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ const Simplex_cell_maps& interior_simplex_cell_maps() const { return interior_simplex_cell_maps_; }
+
+ /**
+ * \brief Returns a vector of maps from the cells of various dimensions on the boundary
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ const Simplex_cell_maps& boundary_simplex_cell_maps() const { return boundary_simplex_cell_maps_; }
+
+ /**
+ * \brief Returns a map from the cells of a given dimension in the interior
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ *
+ * \param[in] cell_d The dimension of the cells.
+ */
+ const Simplex_cell_map& interior_simplex_cell_map(std::size_t cell_d) const {
+ return interior_simplex_cell_maps_[cell_d];
+ }
+
+ /**
+ * \brief Returns a map from the cells of a given dimension on the boundary
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ *
+ * \param[in] cell_d The dimension of the cells.
+ */
+ const Simplex_cell_map& boundary_simplex_cell_map(std::size_t cell_d) const {
+ return boundary_simplex_cell_maps_[cell_d];
+ }
+
+ /**
+ * \brief Returns a map from the cells in the cell complex of type Gudhi::Hasse_cell
+ * to the permutahedral representations of the corresponding simplices in the
+ * ambient triangulation.
+ */
+ const Cell_simplex_map& cell_simplex_map() const { return cell_simplex_map_; }
+
+ /**
+ * \brief Returns a map from the vertex cells in the cell complex of type Gudhi::Hasse_cell
+ * to their Cartesian coordinates.
+ */
+ const Cell_point_map& cell_point_map() const { return cell_point_map_; }
+
+ /**
+ * \brief Constructor for the class Cell_complex.
+ *
+ * \param[in] intrinsic_dimension The dimension of the cell complex.
+ */
+ Cell_complex(std::size_t intrinsic_dimension) : intr_d_(intrinsic_dimension) {}
+
+ ~Cell_complex() {
+ for (Hasse_cell* hs_ptr : hasse_cells_) delete hs_ptr;
+ }
+
+ private:
+ std::size_t intr_d_, cod_d_;
+ Simplex_cell_maps interior_simplex_cell_maps_, boundary_simplex_cell_maps_;
+ Cell_simplex_map cell_simplex_map_;
+ Cell_point_map cell_point_map_;
+ std::vector<Hasse_cell*> hasse_cells_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
new file mode 100644
index 00000000..9b57da3c
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
@@ -0,0 +1,285 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2017 Swansea University UK
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef HASSE_DIAGRAM_CELL_H
+#define HASSE_DIAGRAM_CELL_H
+
+#include <vector>
+#include <utility> // for std::pair
+#include <ostream>
+#include <string>
+#include <type_traits> // for std::is_same
+#include <cstdlib> // for std::size_t
+
+namespace Gudhi {
+namespace Hasse_diagram {
+
+template <typename Cell_type>
+class Hasse_diagram;
+
+/**
+ * \class Hasse_diagram_cell
+ * \brief Data structure to store a cell in a Hasse diagram.
+ *
+ * \ingroup Hasse_diagram
+ *
+ * \details
+ * The use and interfaces of this Hasse diagram cell is limited to the \ref coxeter_triangulation implementation.
+ *
+ * This is a data structure to store a cell in a general Hasse diagram data structure. It stores the following
+ * information about the cell: References to boundary and coBoundary elements, dimension of a cell and its filtration.
+ * It also allow to store any additional information of a type Additional_information which is a template parameter of
+ * the class (set by default to void).
+ *
+ * The complex is a template class requiring the following parameters:
+ * Incidence_type_ - determine the type of incidence coefficients. Use integers in most general case.
+ * Filtration_type_ - type of filtration of cells.
+ * Additional_information_ (set by default to void) - allows to store any
+ * additional information in the cells of Hasse diagrams.
+ *
+ */
+template <typename Incidence_type_, typename Filtration_type_, typename Additional_information_ = void>
+class Hasse_diagram_cell {
+ public:
+ typedef Incidence_type_ Incidence_type;
+ typedef Filtration_type_ Filtration_type;
+ typedef Additional_information_ Additional_information;
+ using Cell_range = std::vector<std::pair<Hasse_diagram_cell*, Incidence_type> >;
+
+ /**
+ * Default constructor.
+ **/
+ Hasse_diagram_cell() : dimension(0), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim.
+ **/
+ Hasse_diagram_cell(int dim) : dimension(dim), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim.
+ **/
+ Hasse_diagram_cell(int dim, Filtration_type filt_)
+ : dimension(dim), position(0), deleted_(false), filtration(filt_) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, int dim)
+ : dimension(dim), boundary(boundary_), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary and coboundary.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, const Cell_range& coboundary_, int dim)
+ : dimension(dim), boundary(boundary_), coBoundary(coboundary_), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary, coboundary and
+ * additional information.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, const Cell_range& coboundary_, const Additional_information& ai,
+ int dim)
+ : dimension(dim),
+ boundary(boundary_),
+ coBoundary(coboundary_),
+ additional_info(ai),
+ position(0),
+ deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim having given additional information.
+ **/
+ Hasse_diagram_cell(Additional_information ai, int dim)
+ : dimension(dim), additional_info(ai), position(0), deleted_(false) {}
+
+ /**
+ * Procedure to get the boundary of a fiven cell. The output format
+ * is a vector of pairs of pointers to boundary elements and incidence
+ * coefficients.
+ **/
+ inline Cell_range& get_boundary() { return this->boundary; }
+
+ /**
+ * Procedure to get the coboundary of a fiven cell. The output format
+ * is a vector of pairs of pointers to coboundary elements and incidence
+ * coefficients.
+ **/
+ inline Cell_range& get_coBoundary() { return this->coBoundary; }
+
+ /**
+ * Procedure to get the dimension of a cell.
+ **/
+ inline int& get_dimension() { return this->dimension; }
+
+ /**
+ * Procedure to get additional information about the cell.s
+ **/
+ inline Additional_information& get_additional_information() { return this->additional_info; }
+
+ /**
+ * Procedure to retrieve the position of the cell in the structure. It is used in
+ * the implementation of Hasse diagram and set by it. Note that removal of
+ * cell and subsequent call of clean_up_the_structure will change those
+ * positions.
+ **/
+ inline unsigned& get_position() { return this->position; }
+
+ /**
+ * Accessing the filtration of the cell.
+ **/
+ inline Filtration_type& get_filtration() {
+ // std::cout << "Accessing the filtration of a cell : " << *this << std::endl;
+ return this->filtration;
+ }
+
+ /**
+ * A procedure used to check if the cell is deleted. It is used by the
+ * subsequent implementation of Hasse diagram that is absed on lazy
+ * delete.
+ **/
+ inline bool deleted() { return this->deleted_; }
+
+ template <typename Cell_type>
+ friend class Hasse_diagram;
+
+ template <typename Cell_type>
+ friend class is_before_in_filtration;
+
+ template <typename Complex_type, typename Cell_type>
+ friend std::vector<Cell_type*> convert_to_vector_of_Cell_type(Complex_type& cmplx);
+
+ /**
+ * Procedure to remove deleted boundary and coboundary elements from the
+ * vectors of boundary and coboundary elements of this cell.
+ **/
+ void remove_deleted_elements_from_boundary_and_coboundary() {
+ Cell_range new_boundary;
+ new_boundary.reserve(this->boundary.size());
+ for (std::size_t bd = 0; bd != this->boundary.size(); ++bd) {
+ if (!this->boundary[bd].first->deleted()) {
+ new_boundary.push_back(this->boundary[bd]);
+ }
+ }
+ this->boundary.swap(new_boundary);
+
+ Cell_range new_coBoundary;
+ new_coBoundary.reserve(this->coBoundary.size());
+ for (std::size_t cbd = 0; cbd != this->coBoundary.size(); ++cbd) {
+ if (!this->coBoundary[cbd].first->deleted()) {
+ new_coBoundary.push_back(this->coBoundary[cbd]);
+ }
+ }
+ this->coBoundary.swap(new_coBoundary);
+ }
+
+ /**
+ * Writing to a stream operator.
+ **/
+ friend std::ostream& operator<<(
+ std::ostream& out, const Hasse_diagram_cell<Incidence_type, Filtration_type, Additional_information>& c) {
+ // cout << "position : " << c.position << ", dimension : " << c.dimension << ", filtration: " << c.filtration << ",
+ // size of boundary : " << c.boundary.size() << "\n";
+ out << c.position << " " << c.dimension << " " << c.filtration << std::endl;
+ for (std::size_t bd = 0; bd != c.boundary.size(); ++bd) {
+ // do not write out the cells that has been deleted
+ if (c.boundary[bd].first->deleted()) continue;
+ out << c.boundary[bd].first->position << " " << c.boundary[bd].second << " ";
+ }
+ out << std::endl;
+ return out;
+ }
+
+ /**
+ * Procedure that return vector of pointers to boundary elements of a given cell.
+ **/
+ inline std::vector<Hasse_diagram_cell*> get_list_of_boundary_elements() {
+ std::vector<Hasse_diagram_cell*> result;
+ std::size_t size_of_boundary = this->boundary.size();
+ result.reserve(size_of_boundary);
+ for (std::size_t bd = 0; bd != size_of_boundary; ++bd) {
+ result.push_back(this->boundary[bd].first);
+ }
+ return result;
+ }
+
+ /**
+ * Procedure that return vector of positios of boundary elements of a given cell.
+ **/
+ inline std::vector<unsigned> get_list_of_positions_of_boundary_elements() {
+ std::vector<unsigned> result;
+ std::size_t size_of_boundary = this->boundary.size();
+ result.reserve(size_of_boundary);
+ for (std::size_t bd = 0; bd != size_of_boundary; ++bd) {
+ result.push_back(this->boundary[bd].first->position);
+ }
+ return result;
+ }
+
+ /**
+ * Function that display a string being a signature of a structure.
+ * Used mainly for debugging purposes.
+ **/
+ std::string full_signature_of_the_structure() {
+ std::string result;
+ result += "dimension: ";
+ result += std::to_string(this->dimension);
+ result += " filtration: ";
+ result += std::to_string(this->filtration);
+ result += " position: ";
+ result += std::to_string(this->position);
+ result += " deleted_: ";
+ result += std::to_string(this->deleted_);
+
+ // if the Additional_information is not void, add them to
+ // the signature as well.
+ if (std::is_same<Additional_information, void>::value) {
+ result += " Additional_information: ";
+ result += std::to_string(this->additional_info);
+ }
+ result += " boundary ";
+ for (std::size_t bd = 0; bd != this->boundary.size(); ++bd) {
+ result += "( " + std::to_string(this->boundary[bd].first->position);
+ result += " " + std::to_string(this->boundary[bd].second);
+ result += ") ";
+ }
+
+ result += " coBoundary ";
+ for (std::size_t cbd = 0; cbd != this->coBoundary.size(); ++cbd) {
+ result += "( " + std::to_string(this->coBoundary[cbd].first->position);
+ result += " " + std::to_string(this->coBoundary[cbd].second);
+ result += ") ";
+ }
+
+ return result;
+ }
+
+ protected:
+ Cell_range boundary;
+ Cell_range coBoundary;
+ int dimension;
+ Additional_information additional_info;
+ unsigned position;
+ bool deleted_;
+ Filtration_type filtration;
+
+ /**
+ * A procedure to delete a cell. It is a private function of the Hasse_diagram_cell
+ * class, since in the Hasse_diagram class I want to have a control
+ * of removal of cells. Therefore, to remove cell please use
+ * remove_cell in the Hasse_diagram structure.
+ **/
+ void delete_cell() { this->deleted_ = true; }
+}; // Hasse_diagram_cell
+
+} // namespace Hasse_diagram
+} // namespace Gudhi
+
+#endif // CELL_H
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h
new file mode 100644
index 00000000..5543c2fb
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h
@@ -0,0 +1,40 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef QUERY_RESULT_H_
+#define QUERY_RESULT_H_
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Query_result
+ * \brief The result of a query by an oracle such as Implicit_manifold_intersection_oracle.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Simplex_handle>
+struct Query_result {
+ /** \brief The potentially lower-dimensional face of the query simplex
+ * that contains the intersection point. OBSOLETE: as the snapping is removed. */
+ // Simplex_handle face;
+ /** \brief The intersection point. */
+ Eigen::VectorXd intersection;
+ /** \brief True if the query simplex intersects the manifold. */
+ bool success;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h b/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h
new file mode 100644
index 00000000..873c5c9b
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h
@@ -0,0 +1,219 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FREUDENTHAL_TRIANGULATION_H_
+#define FREUDENTHAL_TRIANGULATION_H_
+
+#include <vector>
+#include <algorithm> // for std::sort
+#include <cmath> // for std::floor
+#include <numeric> // for std::iota
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Eigenvalues>
+#include <Eigen/SVD>
+
+#include <gudhi/Permutahedral_representation.h>
+#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Freudenthal_triangulation
+ * \brief A class that stores any affine transformation of the Freudenthal-Kuhn
+ * triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \details The data structure is a record that consists of a matrix
+ * that represents the linear transformation of the Freudenthal-Kuhn triangulation
+ * and a vector that represents the offset.
+ *
+ * \tparam Permutahedral_representation_ Type of a simplex given by a permutahedral representation.
+ * Needs to be a model of SimplexInCoxeterTriangulation.
+ */
+template <class Permutahedral_representation_ =
+ Permutahedral_representation<std::vector<int>, std::vector<std::vector<std::size_t> > > >
+class Freudenthal_triangulation {
+ using Matrix = Eigen::MatrixXd;
+ using Vector = Eigen::VectorXd;
+
+ public:
+ /** \brief Type of the simplices in the triangulation. */
+ using Simplex_handle = Permutahedral_representation_;
+
+ /** \brief Type of the vertices in the triangulation. */
+ using Vertex_handle = typename Permutahedral_representation_::Vertex;
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension.
+ * @param[in] dimension The dimension of the triangulation.
+ */
+ Freudenthal_triangulation(std::size_t dimension)
+ : Freudenthal_triangulation(dimension, Matrix::Identity(dimension, dimension), Vector::Zero(dimension)) {
+ is_freudenthal_ = true;
+ }
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension under
+ * a linear transformation by a given matrix.
+ * @param[in] dimension The dimension of the triangulation.
+ * @param[in] matrix The matrix that defines the linear transformation.
+ * Needs to be invertible.
+ */
+ Freudenthal_triangulation(std::size_t dimension, const Matrix& matrix)
+ : Freudenthal_triangulation(dimension, matrix, Vector::Zero(dimension)) {}
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension under
+ * an affine transformation by a given matrix and a translation vector.
+ * @param[in] dimension The dimension of the triangulation.
+ * @param[in] matrix The matrix that defines the linear transformation.
+ * Needs to be invertible.
+ * @param[in] offset The offset vector.
+ *
+ * @exception std::invalid_argument In debug mode, if offset size is different from dimension.
+ */
+ Freudenthal_triangulation(unsigned dimension, const Matrix& matrix, const Vector& offset)
+ : dimension_(dimension),
+ matrix_(matrix),
+ offset_(offset),
+ colpivhouseholderqr_(matrix_.colPivHouseholderQr()),
+ is_freudenthal_(false) {
+ GUDHI_CHECK(dimension == offset_.size(), std::invalid_argument("Offset must be of size 'dimension'"));
+ }
+
+ /** \brief Dimension of the triangulation. */
+ unsigned dimension() const { return dimension_; }
+
+ /** \brief Matrix that defines the linear transformation of the triangulation. */
+ const Matrix& matrix() const { return matrix_; }
+
+ /** \brief Vector that defines the offset of the triangulation. */
+ const Vector& offset() const { return offset_; }
+
+ /** \brief Change the linear transformation matrix to a given value.
+ * @param[in] matrix New value of the linear transformation matrix.
+ */
+ void change_matrix(const Eigen::MatrixXd& matrix) {
+ matrix_ = matrix;
+ colpivhouseholderqr_ = matrix.colPivHouseholderQr();
+ is_freudenthal_ = false;
+ }
+
+ /** \brief Change the offset vector to a given value.
+ * @param[in] offset New value of the offset vector.
+ */
+ void change_offset(const Eigen::VectorXd& offset) {
+ offset_ = offset;
+ is_freudenthal_ = false;
+ }
+
+ /** \brief Returns the permutahedral representation of the simplex in the
+ * triangulation that contains a given query point.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the point location in a subdivided
+ * triangulation.
+ * The returned simplex is always minimal by inclusion.
+ *
+ * \tparam Point_d A class that represents a point in d-dimensional Euclidean space.
+ * The coordinates should be random-accessible. Needs to provide the method size().
+ *
+ * @param[in] point The query point.
+ * @param[in] scale The scale of the triangulation.
+ *
+ * @exception std::invalid_argument In debug mode, if point dimension is different from triangulation one.
+ */
+ template <class Point_d>
+ Simplex_handle locate_point(const Point_d& point, double scale = 1) const {
+ using Ordered_set_partition = typename Simplex_handle::OrderedSetPartition;
+ using Part = typename Ordered_set_partition::value_type;
+ unsigned d = point.size();
+ GUDHI_CHECK(d == dimension_,
+ std::invalid_argument("The point must be of the same dimension as the triangulation"));
+ double error = 1e-9;
+ Simplex_handle output;
+ std::vector<double> z;
+ if (is_freudenthal_) {
+ for (std::size_t i = 0; i < d; i++) {
+ double x_i = scale * point[i];
+ int y_i = std::floor(x_i);
+ output.vertex().push_back(y_i);
+ z.push_back(x_i - y_i);
+ }
+ } else {
+ Eigen::VectorXd p_vect(d);
+ for (std::size_t i = 0; i < d; i++) p_vect(i) = point[i];
+ Eigen::VectorXd x_vect = colpivhouseholderqr_.solve(p_vect - offset_);
+ for (std::size_t i = 0; i < d; i++) {
+ double x_i = scale * x_vect(i);
+ int y_i = std::floor(x_i);
+ output.vertex().push_back(y_i);
+ z.push_back(x_i - y_i);
+ }
+ }
+ z.push_back(0);
+ Part indices(d + 1);
+ std::iota(indices.begin(), indices.end(), 0);
+ std::sort(indices.begin(), indices.end(), [&z](std::size_t i1, std::size_t i2) { return z[i1] > z[i2]; });
+
+ output.partition().push_back(Part(1, indices[0]));
+ for (std::size_t i = 1; i <= d; ++i)
+ if (z[indices[i - 1]] > z[indices[i]] + error)
+ output.partition().push_back(Part(1, indices[i]));
+ else
+ output.partition().back().push_back(indices[i]);
+ return output;
+ }
+
+ /** \brief Returns the Cartesian coordinates of the given vertex.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the computation of Cartesian coordinates
+ * of a vertex in a subdivided triangulation.
+ * @param[in] vertex The query vertex.
+ * @param[in] scale The scale of the triangulation.
+ */
+ Eigen::VectorXd cartesian_coordinates(const Vertex_handle& vertex, double scale = 1) const {
+ Eigen::VectorXd v_vect(dimension_);
+ for (std::size_t j = 0; j < dimension_; j++) v_vect(j) = vertex[j] / scale;
+ return matrix_ * v_vect + offset_;
+ }
+
+ /** \brief Returns the Cartesian coordinates of the barycenter of a given simplex.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the computation of Cartesian coordinates
+ * of the barycenter of a simplex in a subdivided triangulation.
+ * @param[in] simplex The query simplex.
+ * @param[in] scale The scale of the triangulation.
+ */
+ Eigen::VectorXd barycenter(const Simplex_handle& simplex, double scale = 1) const {
+ Eigen::VectorXd res_vector(dimension_);
+ res_vector.setZero(dimension_, 1);
+ for (auto v : simplex.vertex_range()) {
+ res_vector += cartesian_coordinates(v, scale);
+ }
+ return (1. / (simplex.dimension() + 1)) * res_vector;
+ }
+
+ protected:
+ unsigned dimension_;
+ Matrix matrix_;
+ Vector offset_;
+ Eigen::ColPivHouseholderQR<Matrix> colpivhouseholderqr_;
+ bool is_freudenthal_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h b/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h
new file mode 100644
index 00000000..0533bb83
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h
@@ -0,0 +1,157 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_CARTESIAN_PRODUCT_H_
+#define FUNCTIONS_CARTESIAN_PRODUCT_H_
+
+#include <cstdlib>
+#include <tuple>
+#include <type_traits> // for std::enable_if
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/* Get the domain dimension of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), std::size_t>::type get_amb_d(const std::tuple<T...>& tuple) {
+ return 0;
+}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), std::size_t>::type get_amb_d(const std::tuple<T...>& tuple) {
+ return std::get<I>(tuple).amb_d() + get_amb_d<I + 1, T...>(tuple);
+}
+
+/* Get the codomain dimension of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), std::size_t>::type get_cod_d(const std::tuple<T...>& tuple) {
+ return 0;
+}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), std::size_t>::type get_cod_d(const std::tuple<T...>& tuple) {
+ return std::get<I>(tuple).cod_d() + get_cod_d<I + 1, T...>(tuple);
+}
+
+/* Get the seed of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), void>::type get_seed(const std::tuple<T...>& tuple,
+ Eigen::VectorXd& point, std::size_t i = 0) {}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), void>::type get_seed(const std::tuple<T...>& tuple,
+ Eigen::VectorXd& point, std::size_t i = 0) {
+ const auto& f = std::get<I>(tuple);
+ std::size_t n = f.amb_d();
+ Eigen::VectorXd seed = f.seed();
+ for (std::size_t j = 0; j < n; ++j) point(i + j) = seed(j);
+ get_seed<I + 1, T...>(tuple, point, i + n);
+}
+
+/* Get the seed of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), void>::type get_value(const std::tuple<T...>& tuple,
+ const Eigen::VectorXd& x,
+ Eigen::VectorXd& point, std::size_t i = 0,
+ std::size_t j = 0) {}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), void>::type get_value(const std::tuple<T...>& tuple,
+ const Eigen::VectorXd& x,
+ Eigen::VectorXd& point, std::size_t i = 0,
+ std::size_t j = 0) {
+ const auto& f = std::get<I>(tuple);
+ std::size_t n = f.amb_d();
+ std::size_t k = f.cod_d();
+ Eigen::VectorXd x_i(n);
+ for (std::size_t l = 0; l < n; ++l) x_i(l) = x(i + l);
+ Eigen::VectorXd res = f(x_i);
+ for (std::size_t l = 0; l < k; ++l) point(j + l) = res(l);
+ get_value<I + 1, T...>(tuple, x, point, i + n, j + k);
+}
+
+/**
+ * \class Cartesian_product
+ * \brief Constructs the function the zero-set of which is the Cartesian product
+ * of the zero-sets of some given functions.
+ *
+ * \tparam Functions A pack template parameter for functions. All functions should be models of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class... Functions>
+struct Cartesian_product {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result(cod_d_);
+ get_value(function_tuple_, p, result, 0, 0);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return amb_d_; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return cod_d_; }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result(amb_d_);
+ get_seed(function_tuple_, result, 0);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the Cartesian product function.
+ *
+ * @param[in] functions The functions the zero-sets of which are factors in the
+ * Cartesian product of the resulting function.
+ */
+ Cartesian_product(const Functions&... functions) : function_tuple_(std::make_tuple(functions...)) {
+ amb_d_ = get_amb_d(function_tuple_);
+ cod_d_ = get_cod_d(function_tuple_);
+ }
+
+ private:
+ std::tuple<Functions...> function_tuple_;
+ std::size_t amb_d_, cod_d_;
+};
+
+/**
+ * \brief Static constructor of a Cartesian product function.
+ *
+ * @param[in] functions The functions the zero-sets of which are factors in the
+ * Cartesian product of the resulting function.
+ *
+ * \tparam Functions A pack template parameter for functions. All functions should be models of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <typename... Functions>
+Cartesian_product<Functions...> make_product_function(const Functions&... functions) {
+ return Cartesian_product<Functions...>(functions...);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h b/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h
new file mode 100644
index 00000000..0603afd8
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h
@@ -0,0 +1,64 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_CONSTANT_FUNCTION_H_
+#define FUNCTIONS_CONSTANT_FUNCTION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Constant_function
+ * \brief A class that encodes a constant function from R^d to R^k.
+ * This class does not have any implicit manifold in correspondence.
+ */
+struct Constant_function {
+ /** \brief Value of the function at a specified point. The value is constant.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ return value_;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief No seed point is available. Throws an exception on evocation. */
+ Eigen::VectorXd seed() const { throw "Seed invoked on a constant function.\n"; }
+
+ Constant_function() {}
+
+ /**
+ * \brief Constructor of a constant function from R^d to R^m.
+ *
+ * @param[in] d The domain dimension.
+ * @param[in] k The codomain dimension.
+ * @param[in] value The constant value of the function.
+ */
+ Constant_function(std::size_t d, std::size_t k, const Eigen::VectorXd& value) : d_(d), k_(k), value_(value) {}
+
+ private:
+ std::size_t d_, k_;
+ Eigen::VectorXd value_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h
new file mode 100644
index 00000000..e1fe868f
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h
@@ -0,0 +1,93 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_EMBED_IN_RD_H_
+#define FUNCTIONS_EMBED_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Embed_in_Rd
+ * \brief Embedding of an implicit manifold in a higher dimension.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Embed_in_Rd {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd x = p;
+ Eigen::VectorXd x_k(fun_.amb_d()), x_rest(d_ - fun_.amb_d());
+ for (std::size_t i = 0; i < fun_.amb_d(); ++i) x_k(i) = x(i);
+ for (std::size_t i = fun_.amb_d(); i < d_; ++i) x_rest(i - fun_.amb_d()) = x(i);
+ Eigen::VectorXd result = fun_(x_k);
+ result.conservativeResize(this->cod_d());
+ for (std::size_t i = fun_.cod_d(); i < this->cod_d(); ++i) result(i) = x_rest(i - fun_.cod_d());
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return d_; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return d_ - (fun_.amb_d() - fun_.cod_d()); }
+
+ /** \brief Returns a point on the zero-set of the embedded function. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result.conservativeResize(d_);
+ for (std::size_t l = fun_.amb_d(); l < d_; ++l) result(l) = 0;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the embedding function.
+ *
+ * @param[in] function The function to be embedded in higher dimension.
+ * @param[in] d Embedding dimension.
+ */
+ Embed_in_Rd(const Function_& function, std::size_t d) : fun_(function), d_(d) {}
+
+ private:
+ Function_ fun_;
+ std::size_t d_;
+};
+
+/**
+ * \brief Static constructor of an embedding function.
+ *
+ * @param[in] function The function to be embedded in higher dimension.
+ * @param[in] d Embedding dimension.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Embed_in_Rd<Function_> make_embedding(const Function_& function, std::size_t d) {
+ return Embed_in_Rd<Function_>(function, d);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h
new file mode 100644
index 00000000..8911f990
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h
@@ -0,0 +1,110 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_SM_IN_RD_H_
+#define FUNCTIONS_FUNCTION_SM_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_Sm_in_Rd
+ * \brief A class for the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space.
+ */
+struct Function_Sm_in_Rd {
+ /** \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd x = p;
+ for (std::size_t i = 0; i < d_; ++i) x(i) -= center_[i];
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(k_);
+ for (std::size_t i = 0; i < m_ + 1; ++i) result(0) += x(i) * x(i);
+ result(0) -= r_ * r_;
+ for (std::size_t j = 1; j < k_; ++j) result(j) = x(m_ + j);
+ return result;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the sphere. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(d_);
+ result(0) += r_;
+ for (std::size_t i = 0; i < d_; ++i) result(i) += center_[i];
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] d The ambient dimension of the sphere.
+ * @param[in] center The center of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, std::size_t d, Eigen::VectorXd center)
+ : m_(m), k_(d - m), d_(d), r_(r), center_(center) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space centered at the origin.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] d The ambient dimension of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, std::size_t d)
+ : m_(m), k_(d - m), d_(d), r_(r), center_(Eigen::VectorXd::Zero(d_)) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the (m+1)-dimensional Euclidean space.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] center The center of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, Eigen::VectorXd center)
+ : m_(m), k_(1), d_(m_ + 1), r_(r), center_(center) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the (m+1)-dimensional Euclidean space centered at the origin.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m) : m_(m), k_(1), d_(m_ + 1), r_(r), center_(Eigen::VectorXd::Zero(d_)) {}
+
+ Function_Sm_in_Rd(const Function_Sm_in_Rd& rhs) : Function_Sm_in_Rd(rhs.r_, rhs.m_, rhs.d_, rhs.center_) {}
+
+ private:
+ std::size_t m_, k_, d_;
+ double r_;
+ Eigen::VectorXd center_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
new file mode 100644
index 00000000..a9e2d507
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
@@ -0,0 +1,90 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_AFFINE_PLANE_IN_RD_H_
+#define FUNCTIONS_FUNCTION_AFFINE_PLANE_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_affine_plane_in_Rd
+ * \brief A class for the function that defines an m-dimensional implicit affine plane
+ * embedded in d-dimensional Euclidean space.
+ */
+struct Function_affine_plane_in_Rd {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = normal_matrix_.transpose() * (p - off_);
+ return result;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the affine plane. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = off_;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit affine
+ * plane in the d-dimensional Euclidean space.
+ *
+ * @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
+ * correspond to the ambient dimension, the number of columns should correspond to
+ * the size of the normal basis (codimension).
+ * @param[in] offset The offset vector of the affine plane.
+ * The dimension of the vector should be the ambient dimension of the manifold.
+ */
+ Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix, const Eigen::VectorXd& offset)
+ : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), off_(offset) {
+ normal_matrix_.colwise().normalize();
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit affine
+ * plane in the d-dimensional Euclidean space that passes through origin.
+ *
+ * @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
+ * correspond to the ambient dimension, the number of columns should correspond to
+ * the size of the normal basis (codimension).
+ */
+ Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix)
+ : normal_matrix_(normal_matrix),
+ d_(normal_matrix.rows()),
+ k_(normal_matrix.cols()),
+ off_(Eigen::VectorXd::Zero(d_)) {
+ normal_matrix_.colwise().normalize();
+ }
+
+ private:
+ Eigen::MatrixXd normal_matrix_;
+ std::size_t d_, k_;
+ Eigen::VectorXd off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h
new file mode 100644
index 00000000..620446da
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h
@@ -0,0 +1,80 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_CHAIR_IN_R3_H_
+#define FUNCTIONS_FUNCTION_CHAIR_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::pow
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_chair_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a so-called
+ * "chair" surface embedded in R^3.
+ */
+struct Function_chair_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = std::pow(x * x + y * y + z * z - a_ * k_ * k_, 2) -
+ b_ * ((z - k_) * (z - k_) - 2 * x * x) * ((z + k_) * (z + k_) - 2 * y * y);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; }
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ double t1 = a_ - b_;
+ double discr = t1 * t1 - (1.0 - b_) * (a_ * a_ - b_);
+ double z0 = k_ * std::sqrt((t1 + std::sqrt(discr)) / (1 - b_));
+ Eigen::Vector3d result(off_[0], off_[1], z0 + off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines the 'chair' surface
+ * embedded in R^3.
+ *
+ * @param[in] a A numerical parameter.
+ * @param[in] b A numerical parameter.
+ * @param[in] k A numerical parameter.
+ * @param[in] off Offset vector.
+ */
+ Function_chair_in_R3(double a = 0.8, double b = 0.4, double k = 1.0, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : a_(a), b_(b), k_(k), off_(off) {}
+
+ protected:
+ double a_, b_, k_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
+
+// (x^2 + y^2 + z^2 - a*k^2)^2 - b*((z-k)^2 - 2*x^2)*((z+k)^2 - 2*y^2)
+// sqrt(k/(1-b))*sqrt(a-b + sqrt((a-b)^2 - (1-b)*(a^2 - b)*k^2))
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h
new file mode 100644
index 00000000..f73c4280
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h
@@ -0,0 +1,69 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_IRON_IN_R3_H_
+#define FUNCTIONS_FUNCTION_IRON_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::pow
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_iron_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a surface
+ * embedded in R^3 that ressembles an iron.
+ */
+struct Function_iron_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0), y = p(1), z = p(2);
+ Eigen::VectorXd result(cod_d());
+ result(0) = -std::pow(x, 6) / 300. - std::pow(y, 6) / 300. - std::pow(z, 6) / 300. + x * y * y * z / 2.1 + y * y +
+ std::pow(z - 2, 4) - 1;
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(std::pow(4500, 1. / 6), 0, 0);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a surface embedded in R^3
+ * that ressembles an iron.
+ *
+ * @param[in] off Offset vector.
+ */
+ Function_iron_in_R3(Eigen::Vector3d off = Eigen::Vector3d::Zero()) : off_(off) {}
+
+ private:
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h
new file mode 100644
index 00000000..beb41e00
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h
@@ -0,0 +1,85 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_LEMNISCATE_REVOLUTION_IN_R3_H_
+#define FUNCTIONS_FUNCTION_LEMNISCATE_REVOLUTION_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::sqrt
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_lemniscate_revolution_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a surface of revolution
+ * around the x axis based on the lemniscate of Bernoulli embedded in R^3.
+ */
+struct Function_lemniscate_revolution_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ double x2 = x * x, y2 = y * y, z2 = z * z, a2 = a_ * a_;
+ double t1 = x2 + y2 + z2;
+ result(0) = t1 * t1 - 2 * a2 * (x2 - y2 - z2);
+ return result;
+ }
+
+ /** \brief Returns the (ambient) domain dimension.*/
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed2() for the other point.
+ */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(std::sqrt(2 * a_) + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed() for the other point.
+ */
+ Eigen::VectorXd seed2() const {
+ Eigen::Vector3d result(-std::sqrt(2 * a_) + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a surface of revolution
+ * around the x axis based on the lemniscate of Bernoulli embedded in R^3.
+ *
+ * @param[in] a A numerical parameter.
+ * @param[in] off Offset vector.
+ */
+ Function_lemniscate_revolution_in_R3(double a = 1, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : a_(a), off_(off) {}
+
+ private:
+ double a_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
new file mode 100644
index 00000000..f315d794
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
@@ -0,0 +1,84 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_MOMENT_CURVE_IN_RD_H_
+#define FUNCTIONS_FUNCTION_MOMENT_CURVE_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_moment_curve_in_Rd
+ * \brief A class for the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ */
+struct Function_moment_curve_in_Rd {
+ /** \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result(k_);
+ for (std::size_t i = 1; i < d_; ++i) result(i - 1) = p(i) - p(0) * p(i - 1);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension.. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the moment curve. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(d_);
+ return result;
+ }
+
+ /** @brief Returns the radius of the moment curve. */
+ double get_radius() const{
+ return r_;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r Numerical parameter.
+ * @param[in] d The ambient dimension.
+ */
+ Function_moment_curve_in_Rd(double r, std::size_t d) : k_(d - 1), d_(d), r_(r) {}
+
+ /**
+ * \brief Constructor of the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r Numerical parameter.
+ * @param[in] d The ambient dimension.
+ * @param[in] offset The offset of the moment curve.
+ */
+ Function_moment_curve_in_Rd(double r, std::size_t d, Eigen::VectorXd& offset)
+ : k_(d - 1), d_(d), r_(r), off_(offset) {}
+
+ private:
+ std::size_t k_, d_;
+ double r_;
+ Eigen::VectorXd off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h
new file mode 100644
index 00000000..b54d3c74
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h
@@ -0,0 +1,71 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_TORUS_IN_R3_H_
+#define FUNCTIONS_FUNCTION_TORUS_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::sqrt
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_torus_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a torus
+ * surface embedded in R^3.
+ */
+struct Function_torus_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = (z * z + (std::sqrt(x * x + y * y) - r_) * (std::sqrt(x * x + y * y) - r_) - R_ * R_);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(R_ + r_ + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a torus embedded in R^3.
+ *
+ * @param[in] R The outer radius of the torus.
+ * @param[in] r The inner radius of the torus.
+ * @param[in] off Offset vector.
+ */
+ Function_torus_in_R3(double R = 1, double r = 0.5, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : R_(R), r_(r), off_(off) {}
+
+ private:
+ double R_, r_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h
new file mode 100644
index 00000000..df1f1eec
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h
@@ -0,0 +1,78 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_WHITNEY_UMBRELLA_IN_R3_H_
+#define FUNCTIONS_FUNCTION_WHITNEY_UMBRELLA_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_whitney_umbrella_in_R3
+ * \brief A class that encodes the function, the zero-set of which is the Whitney umbrella
+ * surface embedded in R^3.
+ */
+struct Function_whitney_umbrella_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = x * x - y * y * z;
+ return result;
+ }
+
+ /** \brief Returns the (ambient) domain dimension.*/
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed2() for the other point.
+ */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(1 + off_[0], 1 + off_[1], 1 + off_[2]);
+ return result;
+ }
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed() for the other point.
+ */
+ Eigen::VectorXd seed2() const {
+ Eigen::Vector3d result(-1 + off_[0], -1 + off_[1], 1 + off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines the Whitney umbrella in R^3.
+ *
+ * @param[in] off Offset vector.
+ */
+ Function_whitney_umbrella_in_R3(Eigen::Vector3d off = Eigen::Vector3d::Zero()) : off_(off) {}
+
+ private:
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h b/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h
new file mode 100644
index 00000000..82e25bb9
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h
@@ -0,0 +1,88 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_LINEAR_TRANSFORMATION_H_
+#define FUNCTIONS_LINEAR_TRANSFORMATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Linear_transformation
+ * \brief Transforms the zero-set of the function by a given linear transformation.
+ * The underlying function corresponds to f(M*x), where M is the transformation matrix.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Linear_transformation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = fun_(matrix_.householderQr().solve(p));
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result = matrix_ * result;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of a linearly transformed function.
+ *
+ * @param[in] function The function to be linearly transformed.
+ * @param[in] matrix The transformation matrix. Its dimension should be d*d,
+ * where d is the domain (ambient) dimension of 'function'.
+ */
+ Linear_transformation(const Function_& function, const Eigen::MatrixXd& matrix) : fun_(function), matrix_(matrix) {}
+
+ private:
+ Function_ fun_;
+ Eigen::MatrixXd matrix_;
+};
+
+/**
+ * \brief Static constructor of a linearly transformed function.
+ *
+ * @param[in] function The function to be linearly transformed.
+ * @param[in] matrix The transformation matrix. Its dimension should be d*d,
+ * where d is the domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Linear_transformation<Function_> make_linear_transformation(const Function_& function, const Eigen::MatrixXd& matrix) {
+ return Linear_transformation<Function_>(function, matrix);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h b/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h
new file mode 100644
index 00000000..fdf07f27
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h
@@ -0,0 +1,84 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_NEGATION_H_
+#define FUNCTIONS_NEGATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ *\class Negation
+ * \brief Constructs the "minus" function. The zero-set is the same, but
+ * the values at other points are the negative of their original value.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Negation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = -fun_(p);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the negative function.
+ *
+ * @param[in] function The function to be negated.
+ */
+ Negation(const Function_& function) : fun_(function) {}
+
+ private:
+ Function_ fun_;
+};
+
+/**
+ * \brief Static constructor of the negative function.
+ *
+ * @param[in] function The function to be translated.
+ * domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Negation<Function_> negation(const Function_& function) {
+ return Negation<Function_>(function);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h b/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h
new file mode 100644
index 00000000..22071d6d
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h
@@ -0,0 +1,111 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_PL_APPROXIMATION_H_
+#define FUNCTIONS_PL_APPROXIMATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class PL_approximation
+ * \brief Constructs a piecewise-linear approximation of a function induced by
+ * an ambient triangulation.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ * \tparam Triangulation The triangulation template parameter. Should be a model of
+ * the concept TriangulationForManifoldTracing.
+ */
+template <class Function_, class Triangulation_>
+struct PL_approximation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ std::size_t cod_d = this->cod_d();
+ std::size_t amb_d = this->amb_d();
+ auto s = tr_.locate_point(p);
+ Eigen::MatrixXd matrix(cod_d, s.dimension() + 1);
+ Eigen::MatrixXd vertex_matrix(amb_d + 1, s.dimension() + 1);
+ for (std::size_t i = 0; i < s.dimension() + 1; ++i) vertex_matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : s.vertex_range()) {
+ Eigen::VectorXd pt_v = tr_.cartesian_coordinates(v);
+ Eigen::VectorXd fun_v = fun_(pt_v);
+ for (std::size_t i = 1; i < amb_d + 1; ++i) vertex_matrix(i, j) = pt_v(i - 1);
+ for (std::size_t i = 0; i < cod_d; ++i) matrix(i, j) = fun_v(i);
+ j++;
+ }
+ assert(j == s.dimension() + 1);
+ Eigen::VectorXd z(amb_d + 1);
+ z(0) = 1;
+ for (std::size_t i = 1; i < amb_d + 1; ++i) z(i) = p(i - 1);
+ Eigen::VectorXd lambda = vertex_matrix.colPivHouseholderQr().solve(z);
+ Eigen::VectorXd result = matrix * lambda;
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ // TODO: not finished. Should use an oracle.
+ return Eigen::VectorXd(amb_d());
+ }
+
+ /**
+ * \brief Constructor of the piecewise-linear approximation of a function
+ * induced by an ambient triangulation.
+ *
+ * @param[in] function The function.
+ * @param[in] triangulation The ambient triangulation.
+ */
+ PL_approximation(const Function_& function, const Triangulation_& triangulation)
+ : fun_(function), tr_(triangulation) {}
+
+ private:
+ Function_ fun_;
+ Triangulation_ tr_;
+};
+
+/**
+ * \brief Static constructor of the piecewise-linear approximation of a function
+ * induced by an ambient triangulation.
+ *
+ * @param[in] function The function.
+ * @param[in] triangulation The ambient triangulation.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Triangulation_>
+PL_approximation<Function_, Triangulation_> make_pl_approximation(const Function_& function,
+ const Triangulation_& triangulation) {
+ return PL_approximation<Function_, Triangulation_>(function, triangulation);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h b/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h
new file mode 100644
index 00000000..cbe65abe
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h
@@ -0,0 +1,89 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_TRANSLATE_H_
+#define FUNCTIONS_TRANSLATE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Translate
+ * \brief Translates the zero-set of the function by a vector.
+ * The underlying function corresponds to f(x-off), where off is the offset vector.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Translate {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = fun_(p - off_);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result += off_;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the translated function.
+ *
+ * @param[in] function The function to be translated.
+ * @param[in] off The offset vector. The dimension should correspond to the
+ * domain (ambient) dimension of 'function'.
+ */
+ Translate(const Function_& function, const Eigen::VectorXd& off) : fun_(function), off_(off) {}
+
+ private:
+ Function_ fun_;
+ Eigen::VectorXd off_;
+};
+
+/**
+ * \brief Static constructor of a translated function.
+ *
+ * @param[in] function The function to be translated.
+ * @param[in] off The offset vector. The dimension should correspond to the
+ * domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Translate<Function_> translate(const Function_& function, Eigen::VectorXd off) {
+ return Translate<Function_>(function, off);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h b/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h
new file mode 100644
index 00000000..6a896e94
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h
@@ -0,0 +1,72 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_RANDOM_ORTHOGONAL_MATRIX_H_
+#define FUNCTIONS_RANDOM_ORTHOGONAL_MATRIX_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::cos, std::sin
+#include <random> // for std::uniform_real_distribution, std::random_device
+
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+#include <Eigen/SVD>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/point_generators_d.h>
+
+#include <boost/math/constants/constants.hpp> // for PI value
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Generates a uniform random orthogonal matrix using the "subgroup algorithm" by
+ * Diaconis & Shashahani.
+ * \details Taken from https://en.wikipedia.org/wiki/Rotation_matrix#Uniform_random_rotation_matrices.
+ * The idea: take a random rotation matrix of dimension d-1, embed it
+ * as a d*d matrix M with the last column (0,...,0,1).
+ * Pick a random vector v on a sphere S^d. rotate the matrix M so that its last column is v.
+ * The determinant of the matrix can be either 1 or -1
+ */
+// Note: the householderQR operation at the end seems to take a lot of time at compilation.
+// The CGAL headers are another source of long compilation time.
+Eigen::MatrixXd random_orthogonal_matrix(std::size_t d) {
+ typedef CGAL::Epick_d<CGAL::Dynamic_dimension_tag> Kernel;
+ typedef typename Kernel::Point_d Point_d;
+ if (d == 1) return Eigen::VectorXd::Constant(1, 1.0);
+ if (d == 2) {
+ // 0. < alpha < 2 Pi
+ std::uniform_real_distribution<double> unif(0., 2 * boost::math::constants::pi<double>());
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ double alpha = unif(rand_engine);
+
+ Eigen::Matrix2d rot;
+ rot << std::cos(alpha), -std::sin(alpha), std::sin(alpha), cos(alpha);
+ return rot;
+ }
+ Eigen::MatrixXd low_dim_rot = random_orthogonal_matrix(d - 1);
+ Eigen::MatrixXd rot(d, d);
+ Point_d v = *CGAL::Random_points_on_sphere_d<Point_d>(d, 1);
+ for (std::size_t i = 0; i < d; ++i) rot(i, 0) = v[i];
+ for (std::size_t i = 0; i < d - 1; ++i)
+ for (std::size_t j = 1; j < d - 1; ++j) rot(i, j) = low_dim_rot(i, j - 1);
+ for (std::size_t j = 1; j < d; ++j) rot(d - 1, j) = 0;
+ rot = rot.householderQr()
+ .householderQ(); // a way to do Gram-Schmidt, see https://forum.kde.org/viewtopic.php?f=74&t=118568#p297246
+ return rot;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h b/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h
new file mode 100644
index 00000000..ca08f629
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h
@@ -0,0 +1,60 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_MESH_MEDIT_H_
+#define IO_MESH_MEDIT_H_
+
+#include <Eigen/Dense>
+
+#include <vector>
+#include <utility> // for std::pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Mesh_medit
+ * \brief Structure to store a mesh that can be output in Medit .mesh file format
+ * using the output_meshes_to_medit method.
+ *
+ * \ingroup coxeter_triangulation
+ */
+struct Mesh_medit {
+ /** \brief Type of a range of vertices. */
+ typedef std::vector<Eigen::VectorXd> Vertex_points;
+ /** \brief Type of a mesh element.
+ * A pair consisting of a vector of vertex indices of type std::size_t
+ * and of an integer that represents the common reference number for
+ * the mesh elements of this type. */
+ typedef std::pair<std::vector<std::size_t>, std::size_t> Mesh_element;
+ /** \brief Type of a range of mesh elements. */
+ typedef std::vector<Mesh_element> Mesh_elements;
+ /** \brief Type of a range of scalar field . */
+ typedef std::vector<double> Scalar_field_range;
+
+ /** \brief Range of vertices of type Eigen::VectorXd to output. */
+ Vertex_points vertex_points;
+ /** \brief Range of edges. */
+ Mesh_elements edges;
+ /** \brief Range of triangles. */
+ Mesh_elements triangles;
+ /** \brief Range of tetrahedra. */
+ Mesh_elements tetrahedra;
+ /** \brief Range of scalar values over triangles. */
+ Scalar_field_range triangles_scalar_range;
+ /** \brief Range of scalar values over tetrahedra. */
+ Scalar_field_range tetrahedra_scalar_range;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h b/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h
new file mode 100644
index 00000000..9750f366
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h
@@ -0,0 +1,171 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_BUILD_MESH_FROM_CELL_COMPLEX_H_
+#define IO_BUILD_MESH_FROM_CELL_COMPLEX_H_
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/IO/Mesh_medit.h>
+
+#include <Eigen/Dense>
+
+#include <cstdlib> // for std::size_t
+#include <map>
+#include <set>
+#include <string>
+#include <utility> // for std::make_pair
+#include <algorithm> // for std::min
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+struct Configuration {
+ Configuration(bool t_edges, bool t_triangles, bool t_tetrahedra, std::size_t r_edges, std::size_t r_triangles,
+ std::size_t r_tetrahedra)
+ : toggle_edges(t_edges),
+ toggle_triangles(t_triangles),
+ toggle_tetrahedra(t_tetrahedra),
+ ref_edges(r_edges),
+ ref_triangles(r_triangles),
+ ref_tetrahedra(r_tetrahedra) {}
+
+ Configuration() {}
+
+ bool toggle_edges = true, toggle_triangles = true, toggle_tetrahedra = true;
+ std::size_t ref_edges = 1, ref_triangles = 1, ref_tetrahedra = 1;
+};
+
+template <class Hasse_cell, class Simplex_cell_map>
+void populate_mesh(Mesh_medit& output, Simplex_cell_map& sc_map, Configuration configuration, std::size_t amb_d,
+ std::map<Hasse_cell*, std::size_t> vi_map) {
+ using Mesh_element_vertices = Mesh_medit::Mesh_elements::value_type::first_type;
+ std::map<Hasse_cell*, std::size_t> ci_map;
+ std::size_t index = vi_map.size() + 1; // current size of output.vertex_points
+ if (sc_map.size() >= 3)
+ for (const auto& sc_pair : sc_map[2]) {
+ Eigen::VectorXd barycenter = Eigen::VectorXd::Zero(amb_d);
+ std::set<std::size_t> vertex_indices;
+ Hasse_cell* cell = sc_pair.second;
+ for (const auto& ei_pair : cell->get_boundary())
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) vertex_indices.emplace(vi_map[vi_pair.first]);
+ for (const std::size_t& v : vertex_indices) barycenter += output.vertex_points[v - 1];
+ ci_map.emplace(cell, index++);
+ output.vertex_points.emplace_back((1. / vertex_indices.size()) * barycenter);
+#ifdef DEBUG_TRACES
+ std::string vlist = " (" + std::to_string(index - 1) + ")";
+ for (const std::size_t& v : vertex_indices) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(cell), vlist);
+#endif
+ }
+
+ if (configuration.toggle_edges && sc_map.size() >= 2)
+ for (const auto& sc_pair : sc_map[1]) {
+ Hasse_cell* edge_cell = sc_pair.second;
+ Mesh_element_vertices edge;
+ for (const auto& vi_pair : edge_cell->get_boundary()) edge.push_back(vi_map[vi_pair.first]);
+ output.edges.emplace_back(edge, configuration.ref_edges);
+#ifdef DEBUG_TRACES
+ std::string vlist;
+ for (const std::size_t& v : edge) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(edge_cell), vlist);
+#endif
+ }
+
+ if (configuration.toggle_triangles && sc_map.size() >= 3)
+ for (const auto& sc_pair : sc_map[2]) {
+ for (const auto& ei_pair : sc_pair.second->get_boundary()) {
+ Mesh_element_vertices triangle(1, ci_map[sc_pair.second]);
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) triangle.push_back(vi_map[vi_pair.first]);
+ output.triangles.emplace_back(triangle, configuration.ref_triangles);
+ }
+ }
+
+ if (configuration.toggle_tetrahedra && sc_map.size() >= 4)
+ for (const auto& sc_pair : sc_map[3]) {
+ Eigen::VectorXd barycenter = Eigen::VectorXd::Zero(amb_d);
+ std::set<std::size_t> vertex_indices;
+ Hasse_cell* cell = sc_pair.second;
+ for (const auto& ci_pair : cell->get_boundary())
+ for (const auto& ei_pair : ci_pair.first->get_boundary())
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) vertex_indices.emplace(vi_map[vi_pair.first]);
+ for (const std::size_t& v : vertex_indices) barycenter += output.vertex_points[v - 1];
+ output.vertex_points.emplace_back((1. / vertex_indices.size()) * barycenter);
+#ifdef DEBUG_TRACES
+ std::string vlist = " (" + std::to_string(index) + ")";
+ for (const std::size_t& v : vertex_indices) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(cell), vlist);
+#endif
+
+ for (const auto& ci_pair : cell->get_boundary())
+ for (const auto& ei_pair : ci_pair.first->get_boundary()) {
+ Mesh_element_vertices tetrahedron = {index, ci_map[sc_pair.second]};
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) tetrahedron.push_back(vi_map[vi_pair.first]);
+ output.tetrahedra.emplace_back(tetrahedron, configuration.ref_tetrahedra);
+ }
+ index++;
+ }
+}
+
+/** @brief Builds a Gudhi::coxeter_triangulation::Mesh_medit from a Gudhi::coxeter_triangulation::Cell_complex
+ *
+ * @ingroup coxeter_triangulation
+ */
+template <class Cell_complex>
+Mesh_medit build_mesh_from_cell_complex(const Cell_complex& cell_complex,
+ Configuration i_configuration = Configuration(),
+ Configuration b_configuration = Configuration()) {
+ using Hasse_cell = typename Cell_complex::Hasse_cell;
+ Mesh_medit output;
+ std::map<Hasse_cell*, std::size_t> vi_map; // one for vertices, other for 2d-cells
+ std::size_t index = 1; // current size of output.vertex_points
+
+ if (cell_complex.cell_point_map().empty()) return output;
+ std::size_t amb_d = std::min((int)cell_complex.cell_point_map().begin()->second.size(), 3);
+
+ for (const auto& cp_pair : cell_complex.cell_point_map()) {
+#ifdef DEBUG_TRACES
+ std::string vlist;
+ vlist += " " + std::to_string(index);
+ cell_vlist_map.emplace(to_string(cp_pair.first), vlist);
+#endif
+ vi_map.emplace(cp_pair.first, index++);
+ output.vertex_points.push_back(cp_pair.second);
+ output.vertex_points.back().conservativeResize(amb_d);
+ }
+
+ populate_mesh(output, cell_complex.interior_simplex_cell_maps(), i_configuration, amb_d, vi_map);
+#ifdef DEBUG_TRACES
+ for (const auto& sc_map : cell_complex.interior_simplex_cell_maps())
+ for (const auto& sc_pair : sc_map) {
+ std::string simplex = "I" + to_string(sc_pair.first);
+ std::string cell = to_string(sc_pair.second);
+ std::string vlist = cell_vlist_map.at(cell).substr(1);
+ simplex_vlist_map.emplace(simplex, vlist);
+ }
+#endif
+ populate_mesh(output, cell_complex.boundary_simplex_cell_maps(), b_configuration, amb_d, vi_map);
+#ifdef DEBUG_TRACES
+ for (const auto& sc_map : cell_complex.boundary_simplex_cell_maps())
+ for (const auto& sc_pair : sc_map) {
+ std::string simplex = "B" + to_string(sc_pair.first);
+ std::string cell = to_string(sc_pair.second);
+ std::string vlist = cell_vlist_map.at(cell).substr(1);
+ simplex_vlist_map.emplace(simplex, vlist);
+ }
+#endif
+ return output;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h b/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h
new file mode 100644
index 00000000..a2995738
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h
@@ -0,0 +1,550 @@
+#ifndef IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
+#define IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
+
+#ifdef DEBUG_TRACES // All this part of code can be skipped if DEBUG_TRACES are not ON - cmake -DDEBUG_TRACES=ON .
+
+#include <sstream>
+#include <fstream>
+#include <vector>
+#include <list>
+#include <string>
+#include <regex>
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os, const std::vector<T>& vector) {
+ os << "(";
+ if (vector.empty()) {
+ os << ")";
+ return os;
+ }
+ auto v_it = vector.begin();
+ os << *v_it++;
+ for (; v_it != vector.end(); ++v_it) os << ", " << *v_it;
+ os << ")";
+ return os;
+}
+
+/* A class to make the vector horizontal instead of vertical */
+struct Straighten {
+ Straighten(const Eigen::VectorXd& vector) : vector_(vector) {}
+ const Eigen::VectorXd& vector_;
+};
+
+std::ostream& operator<<(std::ostream& os, const Straighten& str) {
+ std::size_t size = str.vector_.size();
+ os << "(" << str.vector_(0);
+ if (size == 0) {
+ os << ")";
+ return os;
+ }
+ for (std::size_t i = 1; i < size; ++i) os << ", " << str.vector_(i);
+ os << ")";
+ return os;
+}
+
+std::string id_from_simplex(const std::string& simplex) {
+ std::regex r("\\s+"), r2("\\(|\\)|\\{|\\}"), r3(","), r4("\\["), r5("\\]");
+ std::string output = std::regex_replace(simplex, r, "");
+ output = std::regex_replace(output, r2, ":");
+ output = std::regex_replace(output, r3, ".");
+ output = std::regex_replace(output, r4, "_");
+ output = std::regex_replace(output, r5, "");
+ return output;
+}
+
+template <typename T>
+std::string to_string(const T& t) {
+ std::ostringstream oss;
+ oss << t;
+ return oss.str();
+}
+
+struct MT_inserted_info {
+ std::string qr_face_, init_face_, qr_intersection_;
+ bool qr_success_, is_boundary_;
+ template <class Query_result, class Simplex_handle>
+ MT_inserted_info(const Query_result& qr, const Simplex_handle& face, bool is_boundary)
+ : qr_face_(to_string(face)),
+ init_face_(to_string(face)),
+ qr_intersection_(to_string(qr.intersection)),
+ qr_success_(qr.success),
+ is_boundary_(is_boundary) {}
+};
+std::list<MT_inserted_info> mt_seed_inserted_list, mt_inserted_list;
+
+struct CC_summary_info {
+ std::string face_, cell_;
+ template <class SC_pair>
+ CC_summary_info(const SC_pair& sc_pair) : face_(to_string(sc_pair.first)), cell_(to_string(sc_pair.second)) {}
+};
+using CC_summary_list = std::list<CC_summary_info>;
+std::vector<CC_summary_list> cc_interior_summary_lists, cc_boundary_summary_lists;
+
+struct CC_detail_info {
+ enum class Result_type { self, face, coface, inserted, join_single, join_is_face };
+ std::string simplex_, trigger_, init_simplex_;
+ Result_type status_;
+ bool join_trigger_ = false;
+ std::list<std::string> faces_, post_faces_, cofaces_;
+ template <class Simplex_handle>
+ CC_detail_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)) {}
+};
+using CC_detail_list = std::list<CC_detail_info>;
+std::vector<CC_detail_list> cc_interior_detail_lists, cc_boundary_detail_lists;
+std::vector<CC_detail_list> cc_interior_insert_detail_lists, cc_boundary_insert_detail_lists;
+
+struct CC_prejoin_info {
+ enum class Result_type { join_single, join_is_face, join_different, join_same };
+ std::string simplex_, join_;
+ std::vector<std::string> faces_;
+ std::size_t dimension_;
+ Result_type status_;
+ template <class Simplex_handle>
+ CC_prejoin_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)), dimension_(simplex.dimension()) {}
+};
+using CC_prejoin_list = std::list<CC_prejoin_info>;
+std::vector<CC_prejoin_list> cc_interior_prejoin_lists, cc_boundary_prejoin_lists;
+
+struct CC_join_info {
+ enum class Result_type { self, face, coface, inserted, join_single, join_is_face };
+ std::string simplex_, join_, trigger_;
+ Result_type status_;
+ std::list<std::string> boundary_faces_;
+ std::list<std::string> faces_, post_faces_, cofaces_;
+ template <class Simplex_handle>
+ CC_join_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)) {}
+};
+bool join_switch = false;
+std::vector<CC_detail_list> cc_interior_join_detail_lists, cc_boundary_join_detail_lists;
+
+std::map<std::string, std::string> cell_vlist_map;
+std::map<std::string, std::string> simplex_vlist_map;
+
+std::ostringstream mt_ostream, vis_ostream;
+std::vector<std::ostringstream> cc_summary_ostream, cc_traces_ostream;
+
+std::string simplex_format(const std::string& simplex, bool is_boundary) {
+ std::string b_simplex = (is_boundary ? "B" : "I") + simplex;
+ std::string tooltiptext;
+ auto it = simplex_vlist_map.find(b_simplex);
+ if (it == simplex_vlist_map.end())
+ tooltiptext = "deleted";
+ else
+ tooltiptext = simplex_vlist_map.at(b_simplex);
+ return (std::string) "<a class=\"" + (is_boundary ? "boundary" : "interior") + "\" href=\"#" +
+ id_from_simplex(b_simplex) + "\">" + b_simplex + "<span class=\"tooltiptext\">" + tooltiptext + "</span></a>";
+}
+
+std::string simplex_format(const std::string& b_simplex) {
+ bool is_boundary = b_simplex[0] == 'B';
+ std::string tooltiptext;
+ auto it = simplex_vlist_map.find(b_simplex);
+ if (it == simplex_vlist_map.end())
+ tooltiptext = "deleted";
+ else
+ tooltiptext = simplex_vlist_map.at(b_simplex);
+ return (std::string) "<a class=\"" + (is_boundary ? "boundary" : "interior") + "\" href=\"#" +
+ id_from_simplex(b_simplex) + "\">" + b_simplex + "<span class=\"tooltiptext\">" + tooltiptext + "</span></a>";
+}
+
+void write_head(std::ofstream& ofs) {
+ ofs << " <head>\n"
+ << " <title>Cell complex debug trace</title>\n"
+ << " <style>\n"
+ << " a.boundary {\n"
+ << " position: relative;\n"
+ << " display: inline-block;\n"
+ << " color: darkred;\n"
+ << " background-color: lightgreen\n"
+ << " }\n"
+ << " a.interior {\n"
+ << " position: relative;\n"
+ << " display: inline-block;\n"
+ << " color: navy;\n"
+ << " background-color: yellow\n"
+ << " }\n"
+ << " .tooltiptext {\n"
+ << " visibility: hidden;\n"
+ << " width: 120px;\n"
+ << " background-color: #555;\n"
+ << " color: #fff;\n"
+ << " text-align: center;\n"
+ << " padding: 5px 0;\n"
+ << " border-radius: 6px;\n"
+ << " position: absolute;\n"
+ << " z-index: 1;\n"
+ << " bottom: 125%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -60px;\n"
+ << " opacity: 0;\n"
+ << " transition: opacity 0.3s;\n"
+ << " }\n"
+ << " .boundary .tooltiptext::after {\n"
+ << " content: \"\";\n"
+ << " position: absolute;\n"
+ << " top: 100%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -5px;\n"
+ << " border-width: 5px;\n"
+ << " border-style: solid;\n"
+ << " border-color: #555 transparent transparent transparent;\n"
+ << " }\n"
+ << " .interior .tooltiptext::after {\n"
+ << " content: \"\";\n"
+ << " position: absolute;\n"
+ << " top: 100%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -5px;\n"
+ << " border-width: 5px;\n"
+ << " border-style: solid;\n"
+ << " border-color: #555 transparent transparent transparent;\n"
+ << " }\n"
+ << " .boundary:hover .tooltiptext {\n"
+ << " visibility: visible;\n"
+ << " opacity: 1;\n"
+ << " }\n"
+ << " .interior:hover .tooltiptext {\n"
+ << " visibility: visible;\n"
+ << " opacity: 1;\n"
+ << " }\n"
+ << " ul.nav {\n"
+ << " list-style-type: none;\n"
+ << " margin: 0;\n"
+ << " padding: 0;\n"
+ << " overflow: auto;\n"
+ << " background-color: #333;\n"
+ << " position: fixed;\n"
+ << " height: 100%;\n"
+ << " width: 15%;\n"
+ << " }\n"
+ << " ul.nav li a {\n"
+ << " display: block;\n"
+ << " color: white;\n"
+ << " text-align: left;\n"
+ << " padding: 14px 16px;\n"
+ << " text-decoration: none;\n"
+ << " }\n"
+ << " .active {\n"
+ << " background-color: #4CAF50;\n"
+ << " }\n"
+ << " div {\n"
+ << " margin-left: 15%;\n"
+ << " padding: 1px 16px\n"
+ << " }\n"
+ << " div.navi {\n"
+ << " margin-left: 0%;\n"
+ << " padding: 0px 0px\n"
+ << " }\n"
+ << " h1 {\n"
+ << " margin-left: 15%;\n"
+ << " padding: 1px 16px\n"
+ << " }\n"
+ << " </style>\n"
+ << " </head>\n";
+}
+
+void write_nav(std::ofstream& ofs) {
+ ofs << " <div class=\"navi\" style=\"margin-top:30px;background-color:#1abc9c;\">\n"
+ << " <ul class=\"nav\">\n"
+ << " <li><a href=\"#mant\">Manifold tracing</a></li>\n"
+ << " <li><a href=\"#cell\">Cell complex</a>\n"
+ << " <ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <li><a href=\"#dim" << i << "\">Dimension " << i << "</a>\n";
+ ofs << " <ul>\n";
+ ofs << " <li><a href=\"#dim" << i << "i\">Interior</a></li>\n";
+ if (i < cc_boundary_summary_lists.size()) {
+ ofs << " <li><a href=\"#dim" << i << "b\">Boundary</a></li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ }
+ ofs << " </ul>\n"
+ << " </li>\n"
+ << " <li><a href=\"#visu\">Visualization details</a></li>\n"
+ << " </ul>\n"
+ << " </div>\n";
+}
+
+void write_mt(std::ofstream& ofs) {
+ ofs << " <div id=\"mant\">\n";
+ ofs << " <h2> Manifold debug trace </h2>\n";
+ ofs << " <h3> Simplices inserted during the seed phase </h3>\n";
+ ofs << " <ul>\n";
+ for (const MT_inserted_info& mt_info : mt_seed_inserted_list) {
+ if (mt_info.qr_success_) {
+ ofs << " <li>Inserted " << simplex_format(mt_info.qr_face_, mt_info.is_boundary_);
+ if (mt_info.qr_face_ != mt_info.init_face_)
+ ofs << " (initially " << simplex_format(mt_info.init_face_, mt_info.is_boundary_) << ")";
+ ofs << " intersection point is " << mt_info.qr_intersection_ << "</li>\n";
+ } else
+ ofs << " <li>Failed to insert " << mt_info.init_face_ << "</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " <h3> Simplices inserted during the while loop phase </h3>\n";
+ ofs << " <ul>\n";
+ for (const MT_inserted_info& mt_info : mt_inserted_list) {
+ if (mt_info.qr_success_) {
+ ofs << " <li>Inserted " << simplex_format(mt_info.qr_face_, mt_info.is_boundary_);
+ if (mt_info.qr_face_ != mt_info.init_face_)
+ ofs << " (initially " << simplex_format(mt_info.init_face_, mt_info.is_boundary_) << ")";
+ ofs << " intersection point is " << mt_info.qr_intersection_ << "</li>\n";
+ } else
+ ofs << " <li>Failed to insert " << mt_info.init_face_ << ")</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </div>\n";
+}
+
+void write_cc(std::ofstream& ofs) {
+ ofs << " <div id=\"cell\">\n"
+ << " <h2> Cell complex debug trace </h2>\n"
+ << " <p>Go to:</p>\n"
+ << " <ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <li><a href=\"#dim" << i << "\">Dimension " << i << "</a></li>\n";
+ }
+ ofs << " </ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <h3 id=\"dim" << i << "\"> Dimension " << i << "</h3>\n";
+ ofs << " <h4 id=\"dim" << i << "i\"> Summary for interior simplices</h4>\n";
+ if (i < cc_boundary_summary_lists.size()) ofs << " <p><a href=\"#dim" << i << "b\">Go to boundary</a></p>\n";
+ ofs << " <ul>\n";
+ for (const CC_summary_info& cc_info : cc_interior_summary_lists[i])
+ ofs << " <li id = \"" << id_from_simplex("I" + cc_info.face_) << "\">"
+ << simplex_format(cc_info.face_, false) << " cell =" << cc_info.cell_ << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " <h4> Prejoin state of the interior cells of dimension " << i << "</h4>\n";
+ auto prejoin_it = cc_interior_prejoin_lists[i].begin();
+ while (prejoin_it != cc_interior_prejoin_lists[i].end()) {
+ std::size_t j = prejoin_it->dimension_;
+ ofs << " <h5>" << j << "-dimensional ambient simplices</h5>\n";
+ ofs << " <ul>\n";
+ for (; prejoin_it->dimension_ == j; ++prejoin_it) {
+ ofs << " <li>" << simplex_format(prejoin_it->simplex_, false)
+ << " join = " << simplex_format(prejoin_it->join_, false) << " boundary:\n"
+ << " <ul>\n";
+ for (const auto& face : prejoin_it->faces_) ofs << " <li>" << simplex_format(face) << "</li>";
+ ofs << " </ul>\n";
+ switch (prejoin_it->status_) {
+ case (CC_prejoin_info::Result_type::join_single):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " as it has a single face.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_is_face):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " as its join " << simplex_format(prejoin_it->join_, false) << " is one of the faces.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_different):
+ ofs << " <p style=\"color: magenta\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " and replaced by its join " << simplex_format(prejoin_it->join_, false) << ".</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_same):
+ ofs << " <p style=\"color: green\">Kept " << simplex_format(prejoin_it->simplex_, false)
+ << ".</p>";
+ }
+ ofs << " </li>";
+ }
+ ofs << " </ul>\n";
+ }
+ ofs << " <h4> Details for interior simplices</h4>\n";
+ ofs << " <ul>\n";
+ for (const CC_detail_info& cc_info : cc_interior_detail_lists[i]) {
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:magenta\" id = \"" << id_from_simplex("I" + cc_info.simplex_)
+ << "\"> Simplex " << simplex_format(cc_info.simplex_, false) << " has only one face ("
+ << simplex_format(cc_info.trigger_, false) << ") and is deleted.";
+ continue;
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:darkmagenta\" id = \"" << id_from_simplex("I" + cc_info.simplex_)
+ << "\"> The join of the simplex " << simplex_format(cc_info.simplex_, false) << " is one of its faces ("
+ << simplex_format(cc_info.trigger_, false) << "), hence it is is deleted.";
+ continue;
+ }
+ ofs << " <li> Insert_cell called for " << simplex_format(cc_info.simplex_, false) << "\n";
+ ofs << " <ul>\n";
+ // for (const std::string& cof: cc_info.faces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, false)
+ // << " is a face of " << simplex_format(cof, false) << "\n";
+ ofs << " </ul>\n";
+ ofs << " <ul>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::self) {
+ ofs << " <p><span style=\"color:blue\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " already exists in the cell complex!</span></p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::face) {
+ ofs << " <p><span style=\"color:red\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " is a face of the simplex " << simplex_format(cc_info.trigger_, false) << "!</span><br>\n";
+ ofs << " <ul>\n";
+ for (const std::string post_face : cc_info.post_faces_)
+ ofs << " <li id = \"" << id_from_simplex("I" + post_face) << "\">"
+ << "Post deleting " << simplex_format(post_face, false) << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " </p>\n";
+ ofs << " <p id = \"" << id_from_simplex("I" + cc_info.trigger_) << "\">"
+ << "Deleting " << simplex_format(cc_info.trigger_, false) << "</p>\n";
+ }
+ // for (const std::string& fac: cc_info.cofaces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, false)
+ // << " is a coface of " << simplex_format(fac, false) << "\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::coface) {
+ ofs << " <p><span style=\"color:darkorange\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " is a coface of the simplex " << simplex_format(cc_info.trigger_, false) << "!</span><p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::inserted) {
+ ofs << " <p><span style=\"color:green\">Successfully inserted "
+ << simplex_format(cc_info.simplex_, false) << "!</span><p>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ }
+ ofs << " </ul>\n";
+
+ if (i < cc_boundary_summary_lists.size()) {
+ ofs << " <h4 id=\"dim" << i << "b\"> Summary for boundary simplices</h4>\n";
+ ofs << " <p><a href=\"#dim" << i << "i\">Go to interior</a></p>\n";
+ ofs << " <ul>\n";
+ for (const CC_summary_info& cc_info : cc_boundary_summary_lists[i])
+ ofs << " <li id = \"" << id_from_simplex("B" + cc_info.face_) << "\">"
+ << simplex_format(cc_info.face_, true) << " cell =" << cc_info.cell_ << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " <h4> Prejoin state of the boundary cells of dimension " << i << "</h4>\n";
+ auto prejoin_it = cc_boundary_prejoin_lists[i].begin();
+ while (prejoin_it != cc_boundary_prejoin_lists[i].end()) {
+ std::size_t j = prejoin_it->dimension_;
+ ofs << " <h5>" << j << "-dimensional ambient simplices</h5>\n";
+ ofs << " <ul>\n";
+ for (; prejoin_it->dimension_ == j; ++prejoin_it) {
+ ofs << " <li>" << simplex_format(prejoin_it->simplex_, true)
+ << " join = " << simplex_format(prejoin_it->join_, true) << " boundary:\n"
+ << " <ul>\n";
+ for (const auto& face : prejoin_it->faces_) ofs << " <li>" << simplex_format(face) << "</li>";
+ ofs << " </ul>\n";
+ switch (prejoin_it->status_) {
+ case (CC_prejoin_info::Result_type::join_single):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " as it has a single face.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_is_face):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " as its join " << simplex_format(prejoin_it->join_, true) << " is one of the faces.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_different):
+ ofs << " <p style=\"color: magenta\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " and replaced by its join " << simplex_format(prejoin_it->join_, true) << ".</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_same):
+ ofs << " <p style=\"color: green\">Kept " << simplex_format(prejoin_it->simplex_, true)
+ << ".</p>";
+ }
+ ofs << " </li>";
+ }
+ ofs << " </ul>\n";
+ }
+ }
+ if (i < cc_boundary_detail_lists.size()) {
+ ofs << " <h4> Details for boundary simplices</h4>\n"
+ << " <ul>\n";
+ for (const CC_detail_info& cc_info : cc_boundary_detail_lists[i]) {
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:magenta\" id = \"" << id_from_simplex("B" + cc_info.simplex_)
+ << "\"> Simplex " << simplex_format(cc_info.simplex_, true) << " has only one face ("
+ << simplex_format(cc_info.trigger_, true) << ") and is deleted.";
+ continue;
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:darkmagenta\" id = \"" << id_from_simplex("B" + cc_info.simplex_)
+ << "\"> The join of the simplex " << simplex_format(cc_info.simplex_, true) << " is one of its faces ("
+ << simplex_format(cc_info.trigger_, true) << "), hence it is is deleted.";
+ continue;
+ }
+ ofs << " <li> Insert_simplex called on " << simplex_format(cc_info.simplex_, true);
+ ofs << " <ul>\n";
+ // for (const std::string& cof: cc_info.faces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, true)
+ // << " is a face of " << simplex_format(cof, true) << "\n";
+ ofs << " </ul>\n";
+ ofs << " <ul>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::self) {
+ ofs << " <p><span style=\"color:blue\">The simplex " << simplex_format(cc_info.simplex_, true)
+ << " already exists in the cell complex!</span></p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::face) {
+ ofs << " <p><span style=\"color:red\">The simplex " << simplex_format(cc_info.simplex_, true)
+ << " is a face of the simplex " << simplex_format(cc_info.trigger_, true) << "!</span><br>\n";
+ ofs << " <ul>\n";
+ for (const std::string post_face : cc_info.post_faces_)
+ ofs << " <li id=\"" << id_from_simplex("B" + post_face) << "\">Post deleting "
+ << simplex_format(post_face, true) << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " </p>\n";
+ ofs << " <p id=\"" << id_from_simplex(cc_info.trigger_) << "\">Deleting "
+ << simplex_format(cc_info.trigger_, true) << "</p>\n";
+ }
+ // for (const std::string& fac: cc_info.cofaces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, true)
+ // << " is a coface of " << simplex_format(fac, true) << "\n";
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::coface) {
+ ofs << " <p><span style=\"color:darkorange\">The simplex "
+ << simplex_format(cc_info.simplex_, true) << " is a coface of the simplex "
+ << simplex_format(cc_info.trigger_, true) << "!</span><p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::inserted) {
+ ofs << " <p><span style=\"color:green\">Successfully inserted "
+ << simplex_format(cc_info.simplex_, true) << "!</span><p>\n";
+ }
+ }
+ ofs << " </ul>\n";
+ }
+ }
+ ofs << " </div>\n";
+}
+
+void write_visu(std::ofstream& ofs) {
+ ofs << " <div id=\"visu\">\n"
+ << " <h2> Visualization details debug trace </h2>\n";
+ // std::vector<std::map<std::string, std::string> > vs_maps(cc_interior_summary_lists.size());
+ std::map<std::string, std::string> vs_map;
+ for (const auto& sv_pair : simplex_vlist_map) vs_map.emplace(sv_pair.second, sv_pair.first);
+ ofs << " <ul>\n";
+ for (const auto& vs_pair : vs_map) {
+ std::string w_simplex = vs_pair.second.substr(1);
+ bool is_boundary = vs_pair.second[0] == 'B';
+ ofs << " <li><b>" << vs_pair.first << "</b>: " << simplex_format(w_simplex, is_boundary) << "</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </div>\n";
+}
+
+void write_to_html(std::string file_name) {
+ std::ofstream ofs(file_name + ".html", std::ofstream::out);
+ ofs << "<!DOCTYPE html>\n"
+ << "<html>\n";
+ write_head(ofs);
+ ofs << " <body>\n";
+ write_nav(ofs);
+ ofs << " <h1> Debug traces for " << file_name << " </h1>\n";
+ write_mt(ofs);
+ write_cc(ofs);
+ write_visu(ofs);
+ ofs << " </body>\n";
+ ofs << "</html>\n";
+
+ ofs.close();
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // DEBUG_TRACES
+#endif // IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h b/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h
new file mode 100644
index 00000000..f69d8b29
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h
@@ -0,0 +1,154 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_OUTPUT_MESHES_TO_MEDIT_H_
+#define IO_OUTPUT_MESHES_TO_MEDIT_H_
+
+#include <gudhi/IO/Mesh_medit.h>
+
+#include <Eigen/Dense>
+
+#include <cstdlib> // for std::size_t
+#include <fstream> // for std::ofstream
+#include <vector>
+#include <type_traits> // for std::enable_if
+#include <tuple> // for std::get
+#include <utility> // for std::make_pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+using Vertex_points = Mesh_medit::Vertex_points;
+using Mesh_elements = Mesh_medit::Mesh_elements;
+using Scalar_field_range = Mesh_medit::Scalar_field_range;
+
+template <std::size_t I = 0, typename... Meshes>
+typename std::enable_if<I == sizeof...(Meshes), void>::type fill_meshes(Vertex_points& vertex_points,
+ Mesh_elements& edges, Mesh_elements& triangles,
+ Mesh_elements& tetrahedra,
+ Scalar_field_range& triangles_scalar_range,
+ Scalar_field_range& tetrahedra_scalar_range,
+ std::size_t index, const Meshes&... meshes) {}
+
+template <std::size_t I = 0, typename... Meshes>
+typename std::enable_if<I != sizeof...(Meshes), void>::type fill_meshes(Vertex_points& vertex_points,
+ Mesh_elements& edges, Mesh_elements& triangles,
+ Mesh_elements& tetrahedra,
+ Scalar_field_range& triangles_scalar_range,
+ Scalar_field_range& tetrahedra_scalar_range,
+ std::size_t index, const Meshes&... meshes) {
+ auto mesh = std::get<I>(std::forward_as_tuple(meshes...));
+ for (const auto& v : mesh.vertex_points) vertex_points.push_back(v);
+ for (const auto& e : mesh.edges) {
+ std::vector<std::size_t> edge;
+ for (const auto& v_i : e.first) edge.push_back(v_i + index);
+ edges.emplace_back(edge, e.second);
+ }
+ for (const auto& t : mesh.triangles) {
+ std::vector<std::size_t> triangle;
+ for (const auto& v_i : t.first) triangle.push_back(v_i + index);
+ triangles.emplace_back(triangle, t.second);
+ }
+ for (const auto& t : mesh.tetrahedra) {
+ std::vector<std::size_t> tetrahedron;
+ for (const auto& v_i : t.first) tetrahedron.push_back(v_i + index);
+ tetrahedra.emplace_back(tetrahedron, t.second);
+ }
+ for (const auto& b : mesh.triangles_scalar_range) triangles_scalar_range.push_back(b);
+ for (const auto& b : mesh.tetrahedra_scalar_range) tetrahedra_scalar_range.push_back(b);
+ fill_meshes<I + 1, Meshes...>(vertex_points, edges, triangles, tetrahedra, triangles_scalar_range,
+ tetrahedra_scalar_range, index + mesh.vertex_points.size(), meshes...);
+}
+
+/** \brief Outputs a text file with specified meshes that can be visualized in
+ * <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">Medit</a>.
+ *
+ * @param[in] amb_d Ambient dimension. Can be 2 or 3.
+ * @param[in] file_name The name of the output file.
+ * @param[in] meshes A pack of meshes to be specified separated by commas.
+ *
+ * @ingroup coxeter_triangulation
+ */
+template <typename... Meshes>
+void output_meshes_to_medit(std::size_t amb_d, std::string file_name, const Meshes&... meshes) {
+ Vertex_points vertex_points;
+ Mesh_elements edges, triangles, tetrahedra;
+ Scalar_field_range triangles_scalar_range, tetrahedra_scalar_range;
+ fill_meshes(vertex_points, edges, triangles, tetrahedra, triangles_scalar_range, tetrahedra_scalar_range, 0,
+ meshes...);
+
+ std::ofstream ofs(file_name + ".mesh", std::ofstream::out);
+ std::ofstream ofs_bb(file_name + ".bb", std::ofstream::out);
+
+ if (amb_d == 2) {
+ ofs << "MeshVersionFormatted 1\nDimension 2\n";
+ ofs_bb << "2 1 ";
+ ofs << "Vertices\n" << vertex_points.size() << "\n";
+ for (auto p : vertex_points) {
+ ofs << p[0] << " " << p[1] << " 2\n";
+ }
+ ofs << "Edges " << edges.size() << "\n";
+ for (auto e : edges) {
+ for (auto v : e.first) ofs << v << " ";
+ ofs << e.second << std::endl;
+ }
+ ofs << "Triangles " << triangles.size() << "\n";
+ for (auto s : triangles) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+
+ ofs_bb << triangles_scalar_range.size() << " 1\n";
+ for (auto& b : triangles_scalar_range) ofs_bb << b << "\n";
+
+ } else {
+ ofs << "MeshVersionFormatted 1\nDimension 3\n";
+ ofs_bb << "3 1 ";
+ ofs << "Vertices\n" << vertex_points.size() << "\n";
+ for (auto p : vertex_points) {
+ ofs << p[0] << " " << p[1] << " " << p[2] << " 2\n";
+ }
+ ofs << "Edges " << edges.size() << "\n";
+ for (auto e : edges) {
+ for (auto v : e.first) ofs << v << " ";
+ ofs << e.second << std::endl;
+ }
+ ofs << "Triangles " << triangles.size() << "\n";
+ for (auto s : triangles) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+ ofs << "Tetrahedra " << tetrahedra.size() << "\n";
+ for (auto s : tetrahedra) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+
+ ofs_bb << triangles_scalar_range.size() + tetrahedra_scalar_range.size() << " 1\n";
+ for (auto& b : triangles_scalar_range) ofs_bb << b << "\n";
+ for (auto& b : tetrahedra_scalar_range) ofs_bb << b << "\n";
+ }
+
+ ofs.close();
+ ofs_bb.close();
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h b/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h
new file mode 100644
index 00000000..277f8b6c
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h
@@ -0,0 +1,261 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IMPLICIT_MANIFOLD_INTERSECTION_ORACLE_H_
+#define IMPLICIT_MANIFOLD_INTERSECTION_ORACLE_H_
+
+#include <Eigen/Dense>
+
+#include <gudhi/Permutahedral_representation/face_from_indices.h>
+#include <gudhi/Functions/Constant_function.h>
+#include <gudhi/Functions/PL_approximation.h>
+#include <gudhi/Coxeter_triangulation/Query_result.h>
+#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
+
+#include <vector>
+#include <limits> // for std::numeric_limits<>
+#include <cmath> // for std::fabs
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Implicit_manifold_intersection_oracle
+ * \brief An oracle that supports the intersection query on an implicit manifold.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ * \tparam Domain_function_ The domain function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Domain_function_ = Constant_function>
+class Implicit_manifold_intersection_oracle {
+ /* Computes the affine coordinates of the intersection point of the implicit manifold
+ * and the affine hull of the simplex. */
+ template <class Simplex_handle, class Triangulation>
+ Eigen::VectorXd compute_lambda(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ std::size_t cod_d = this->cod_d();
+ Eigen::MatrixXd matrix(cod_d + 1, cod_d + 1);
+ for (std::size_t i = 0; i < cod_d + 1; ++i) matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : simplex.vertex_range()) {
+ Eigen::VectorXd v_coords = fun_(triangulation.cartesian_coordinates(v));
+ for (std::size_t i = 1; i < cod_d + 1; ++i) matrix(i, j) = v_coords(i - 1);
+ j++;
+ }
+ Eigen::VectorXd z(cod_d + 1);
+ z(0) = 1;
+ for (std::size_t i = 1; i < cod_d + 1; ++i) z(i) = 0;
+ Eigen::VectorXd lambda = matrix.colPivHouseholderQr().solve(z);
+ if (!z.isApprox(matrix*lambda)) {
+ // NaN non valid results
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) lambda(i) =
+ std::numeric_limits<double>::quiet_NaN();
+ }
+ return lambda;
+ }
+
+ /* Computes the affine coordinates of the intersection point of the boundary
+ * of the implicit manifold and the affine hull of the simplex. */
+ template <class Simplex_handle, class Triangulation>
+ Eigen::VectorXd compute_boundary_lambda(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ std::size_t cod_d = this->cod_d();
+ Eigen::MatrixXd matrix(cod_d + 2, cod_d + 2);
+ for (std::size_t i = 0; i < cod_d + 2; ++i) matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : simplex.vertex_range()) {
+ Eigen::VectorXd v_coords = fun_(triangulation.cartesian_coordinates(v));
+ for (std::size_t i = 1; i < cod_d + 1; ++i) matrix(i, j) = v_coords(i - 1);
+ Eigen::VectorXd bv_coords = domain_fun_(triangulation.cartesian_coordinates(v));
+ matrix(cod_d + 1, j) = bv_coords(0);
+ j++;
+ }
+ Eigen::VectorXd z(cod_d + 2);
+ z(0) = 1;
+ for (std::size_t i = 1; i < cod_d + 2; ++i) z(i) = 0;
+ Eigen::VectorXd lambda = matrix.colPivHouseholderQr().solve(z);
+ if (!z.isApprox(matrix*lambda)) {
+ // NaN non valid results
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) lambda(i) =
+ std::numeric_limits<double>::quiet_NaN();
+ }
+ return lambda;
+ }
+
+ /* Computes the intersection result for a given simplex in a triangulation. */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersection_result(const Eigen::VectorXd& lambda, const Simplex_handle& simplex,
+ const Triangulation& triangulation) const {
+ using QR = Query_result<Simplex_handle>;
+ std::size_t amb_d = triangulation.dimension();
+ std::size_t cod_d = simplex.dimension();
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) {
+ if (std::isnan(lambda(i))) return QR({Eigen::VectorXd(), false});
+ GUDHI_CHECK((std::fabs(lambda(i) - 1.) > std::numeric_limits<double>::epsilon() &&
+ std::fabs(lambda(i) - 0.) > std::numeric_limits<double>::epsilon()),
+ std::invalid_argument("A vertex of the triangulation lies exactly on the manifold"));
+ if (lambda(i) < 0. || lambda(i) > 1.) return QR({Eigen::VectorXd(), false});
+ }
+ Eigen::MatrixXd vertex_matrix(cod_d + 1, amb_d);
+ auto v_range = simplex.vertex_range();
+ auto v_it = v_range.begin();
+ for (std::size_t i = 0; i < cod_d + 1 && v_it != v_range.end(); ++v_it, ++i) {
+ Eigen::VectorXd v_coords = triangulation.cartesian_coordinates(*v_it);
+ for (std::size_t j = 0; j < amb_d; ++j) vertex_matrix(i, j) = v_coords(j);
+ }
+ Eigen::VectorXd intersection = lambda.transpose() * vertex_matrix;
+ return QR({intersection, true});
+ }
+
+ public:
+ /** \brief Ambient dimension of the implicit manifold. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Codimension of the implicit manifold. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief The seed point of the implicit manifold. */
+ Eigen::VectorXd seed() const { return fun_.seed(); }
+
+ /** \brief Intersection query with the relative interior of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the relative interior of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the manifold
+ * (the codomain dimension of the function).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ Eigen::VectorXd lambda = compute_lambda(simplex, triangulation);
+ return intersection_result(lambda, simplex, triangulation);
+ }
+
+ /** \brief Intersection query with the boundary of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the boundary of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the boundary of the manifold
+ * (the codomain dimension of the function + 1).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects_boundary(const Simplex_handle& simplex,
+ const Triangulation& triangulation) const {
+ //std::cout << "intersects_boundary" << std::endl;
+ Eigen::VectorXd lambda = compute_boundary_lambda(simplex, triangulation);
+ return intersection_result(lambda, simplex, triangulation);
+ }
+
+ /** \brief Returns true if the input point lies inside the piecewise-linear
+ * domain induced by the given ambient triangulation that defines the relative
+ * interior of the piecewise-linear approximation of the manifold.
+ *
+ * @param p The input point. Needs to have the same dimension as the ambient
+ * dimension of the manifold (the domain dimension of the function).
+ * @param triangulation The ambient triangulation. Needs to have the same
+ * dimension as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Triangulation>
+ bool lies_in_domain(const Eigen::VectorXd& p, const Triangulation& triangulation) const {
+ Eigen::VectorXd pl_p = make_pl_approximation(domain_fun_, triangulation)(p);
+ return pl_p(0) < 0;
+ }
+
+ /** \brief Returns the function that defines the interior of the manifold */
+ const Function_& function() const { return fun_; }
+
+ /** \brief Constructs an intersection oracle for an implicit manifold potentially
+ * with boundary from given function and domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * before the restriction with the domain.
+ * @param domain_function The input domain function that can be used to define an implicit
+ * manifold with boundary.
+ */
+ Implicit_manifold_intersection_oracle(const Function_& function, const Domain_function_& domain_function)
+ : fun_(function), domain_fun_(domain_function) {}
+
+ /** \brief Constructs an intersection oracle for an implicit manifold
+ * without boundary from a given function.
+ *
+ * \details To use this constructor, the template Domain_function_ needs to be left
+ * at its default value (Gudhi::coxeter_triangulation::Constant_function).
+ *
+ * @param function The input function that represents the implicit manifold
+ * without boundary.
+ */
+ Implicit_manifold_intersection_oracle(const Function_& function)
+ : fun_(function), domain_fun_(function.amb_d(), 1, Eigen::VectorXd::Constant(1, -1)) {}
+
+ private:
+ Function_ fun_;
+ Domain_function_ domain_fun_;
+};
+
+/** \brief Static constructor of an intersection oracle from a function with a domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * before the restriction with the domain.
+ * @param domain_function The input domain function that can be used to define an implicit
+ * manifold with boundary.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Domain_function_>
+Implicit_manifold_intersection_oracle<Function_, Domain_function_> make_oracle(
+ const Function_& function, const Domain_function_& domain_function) {
+ return Implicit_manifold_intersection_oracle<Function_, Domain_function_>(function, domain_function);
+}
+
+/** \brief Static constructor of an intersection oracle from a function without a domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * without boundary.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Implicit_manifold_intersection_oracle<Function_> make_oracle(const Function_& function) {
+ return Implicit_manifold_intersection_oracle<Function_>(function);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h b/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h
new file mode 100644
index 00000000..d61bbed7
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h
@@ -0,0 +1,270 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef MANIFOLD_TRACING_H_
+#define MANIFOLD_TRACING_H_
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/Coxeter_triangulation/Query_result.h>
+
+#include <boost/functional/hash.hpp>
+
+#include <Eigen/Dense>
+
+#include <queue>
+#include <unordered_map>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \ingroup coxeter_triangulation
+ */
+
+/** \class Manifold_tracing
+ * \brief A class that assembles methods for manifold tracing algorithm.
+ *
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ */
+template <class Triangulation_>
+class Manifold_tracing {
+ public:
+ using Simplex_handle = typename Triangulation_::Simplex_handle;
+
+ struct Simplex_hash {
+ typedef Simplex_handle argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(const argument_type& s) const noexcept {
+ return boost::hash<typename Simplex_handle::Vertex>()(s.vertex());
+ }
+ };
+
+ public:
+ /** \brief Type of the output simplex map with keys of type Triangulation_::Simplex_handle
+ * and values of type Eigen::VectorXd.
+ * This type should be used for the output in the method manifold_tracing_algorithm.
+ */
+ typedef std::unordered_map<Simplex_handle, Eigen::VectorXd, Simplex_hash> Out_simplex_map;
+
+ /**
+ * \brief Computes the set of k-simplices that intersect
+ * a boundaryless implicit manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] out_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the input manifold and the mapped values
+ * are the intersection points.
+ */
+ template <class Point_range, class Intersection_oracle>
+ void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation_& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& out_simplex_map) {
+ std::size_t cod_d = oracle.cod_d();
+ std::queue<Simplex_handle> queue;
+
+ for (const auto& p : seed_points) {
+ Simplex_handle full_simplex = triangulation.locate_point(p);
+ for (Simplex_handle face : full_simplex.face_range(cod_d)) {
+ Query_result<Simplex_handle> qr = oracle.intersects(face, triangulation);
+ if (qr.success && out_simplex_map.emplace(face, qr.intersection).second) {
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ queue.emplace(face);
+ break;
+ }
+ }
+ }
+
+ while (!queue.empty()) {
+ Simplex_handle s = queue.front();
+ queue.pop();
+ for (auto cof : s.coface_range(cod_d + 1)) {
+ for (auto face : cof.face_range(cod_d)) {
+ Query_result<Simplex_handle> qr = oracle.intersects(face, triangulation);
+ if (qr.success && out_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ }
+ }
+ }
+ }
+
+ /**
+ * \brief Computes the set of k-simplices that intersect
+ * the dimensional manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] interior_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the relative interior of the input manifold
+ * and the mapped values are the intersection points.
+ * \param[out] boundary_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the boundary of the input manifold
+ * and the mapped values are the intersection points.
+ */
+ template <class Point_range, class Intersection_oracle>
+ void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation_& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& interior_simplex_map,
+ Out_simplex_map& boundary_simplex_map) {
+ std::size_t cod_d = oracle.cod_d();
+ std::queue<Simplex_handle> queue;
+
+ for (const auto& p : seed_points) {
+ Simplex_handle full_simplex = triangulation.locate_point(p);
+ for (Simplex_handle face : full_simplex.face_range(cod_d)) {
+ auto qr = oracle.intersects(face, triangulation);
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ if (qr.success) {
+ if (oracle.lies_in_domain(qr.intersection, triangulation)) {
+ if (interior_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ } else {
+ for (Simplex_handle cof : face.coface_range(cod_d + 1)) {
+ auto qrb = oracle.intersects_boundary(cof, triangulation);
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qrb, cof, true));
+#endif
+ if (qrb.success) boundary_simplex_map.emplace(cof, qrb.intersection);
+ }
+ }
+ // break;
+ }
+ }
+ }
+
+ while (!queue.empty()) {
+ Simplex_handle s = queue.front();
+ queue.pop();
+ for (auto cof : s.coface_range(cod_d + 1)) {
+ for (auto face : cof.face_range(cod_d)) {
+ auto qr = oracle.intersects(face, triangulation);
+#ifdef DEBUG_TRACES
+ mt_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ if (qr.success) {
+ if (oracle.lies_in_domain(qr.intersection, triangulation)) {
+ if (interior_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ } else {
+ auto qrb = oracle.intersects_boundary(cof, triangulation);
+#ifdef DEBUG_TRACES
+ mt_inserted_list.push_back(MT_inserted_info(qrb, cof, true));
+#endif
+ if (qrb.success) boundary_simplex_map.emplace(cof, qrb.intersection);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /** \brief Empty constructor */
+ Manifold_tracing() {}
+};
+
+/**
+ * \brief Static method for Manifold_tracing<Triangulation_>::manifold_tracing_algorithm
+ * that computes the set of k-simplices that intersect
+ * a boundaryless implicit manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ * \tparam Out_simplex_map Needs to be Manifold_tracing<Triangulation_>::Out_simplex_map.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] out_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the input manifold and the mapped values
+ * are the intersection points.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Point_range, class Triangulation, class Intersection_oracle, class Out_simplex_map>
+void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& out_simplex_map) {
+ Manifold_tracing<Triangulation> mt;
+ mt.manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map);
+}
+
+/**
+ * \brief Static method for Manifold_tracing<Triangulation_>::manifold_tracing_algorithm
+ * the dimensional manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ * \tparam Out_simplex_map Needs to be Manifold_tracing<Triangulation_>::Out_simplex_map.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] interior_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the relative interior of the input manifold
+ * and the mapped values are the intersection points.
+ * \param[out] boundary_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the boundary of the input manifold
+ * and the mapped values are the intersection points.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Point_range, class Triangulation, class Intersection_oracle, class Out_simplex_map>
+void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& interior_simplex_map,
+ Out_simplex_map& boundary_simplex_map) {
+ Manifold_tracing<Triangulation> mt;
+ mt.manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map, boundary_simplex_map);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h
new file mode 100644
index 00000000..76438c91
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h
@@ -0,0 +1,216 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_H_
+#define PERMUTAHEDRAL_REPRESENTATION_H_
+
+#include <gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h>
+
+#include <utility> // for std::make_pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Permutahedral_representation
+ * \brief A class that stores the permutahedral representation of a simplex
+ * in a Coxeter triangulation or a Freudenthal-Kuhn triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \details The data structure is a record consisting of a range that
+ * represents the vertex and a range that represents the ordered set
+ * partition, both of which identify the simplex in the triangulation.
+ *
+ * \tparam Vertex_ needs to be a random-access range.
+ * \tparam Ordered_set_partition_ needs to be a a random-access range that consists of
+ * random-access ranges.
+ */
+template <class Vertex_, class Ordered_set_partition_>
+class Permutahedral_representation {
+ typedef Permutahedral_representation<Vertex_, Ordered_set_partition_> Self;
+
+ public:
+ /** \brief Type of the vertex. */
+ typedef Vertex_ Vertex;
+
+ /** \brief Type of the ordered partition. */
+ typedef Ordered_set_partition_ OrderedSetPartition;
+
+ /** \brief Permutahedral_representation constructor from a vertex and an ordered set partition.
+ *
+ * @param[in] vertex Vertex.
+ * @param[in] partition Ordered set partition.
+ *
+ * \details If the size of vertex is d, the ranges in partition must consist
+ * of the integers 0,...,d without repetition or collision between the ranges.
+ */
+ Permutahedral_representation(const Vertex& vertex, const OrderedSetPartition& partition)
+ : vertex_(vertex), partition_(partition) {}
+
+ /** \brief Constructor for an empty permutahedral representation that does not correspond
+ * to any simplex.
+ */
+ Permutahedral_representation() {}
+
+ /** \brief Dimension of the simplex. */
+ std::size_t dimension() const { return partition_.size() - 1; }
+
+ /** \brief Lexicographically-minimal vertex. */
+ Vertex& vertex() { return vertex_; }
+
+ /** \brief Lexicographically-minimal vertex. */
+ const Vertex& vertex() const { return vertex_; }
+
+ /** \brief Ordered set partition. */
+ OrderedSetPartition& partition() { return partition_; }
+
+ /** \brief Identifying vertex. */
+ const OrderedSetPartition& partition() const { return partition_; }
+
+ /** \brief Equality operator.
+ * Returns true if an only if both vertex and the ordered set partition coincide.
+ */
+ bool operator==(const Permutahedral_representation& other) const {
+ if (dimension() != other.dimension()) return false;
+ if (vertex_ != other.vertex_) return false;
+ for (std::size_t k = 0; k < partition_.size(); ++k)
+ if (partition_[k] != other.partition_[k]) return false;
+ return true;
+ }
+
+ /** \brief Inequality operator.
+ * Returns true if an only if either vertex or the ordered set partition are different.
+ */
+ bool operator!=(const Permutahedral_representation& other) const { return !(*this == other); }
+
+ typedef Gudhi::coxeter_triangulation::Vertex_iterator<Self> Vertex_iterator;
+ typedef boost::iterator_range<Vertex_iterator> Vertex_range;
+ /** \brief Returns a range of vertices of the simplex.
+ * The type of vertices is Vertex.
+ */
+ Vertex_range vertex_range() const { return Vertex_range(Vertex_iterator(*this), Vertex_iterator()); }
+
+ typedef Gudhi::coxeter_triangulation::Face_iterator<Self> Face_iterator;
+ typedef boost::iterator_range<Face_iterator> Face_range;
+ /** \brief Returns a range of permutahedral representations of faces of the simplex.
+ * @param[in] value_dim The dimension of the faces. Must be between 0 and the dimension of the simplex.
+ */
+ Face_range face_range(std::size_t value_dim) const {
+ return Face_range(Face_iterator(*this, value_dim), Face_iterator());
+ }
+
+ /** \brief Returns a range of permutahedral representations of facets of the simplex.
+ * The dimension of the simplex must be strictly positive.
+ */
+ Face_range facet_range() const { return Face_range(Face_iterator(*this, dimension() - 1), Face_iterator()); }
+
+ typedef Gudhi::coxeter_triangulation::Coface_iterator<Self> Coface_iterator;
+ typedef boost::iterator_range<Coface_iterator> Coface_range;
+ /** \brief Returns a range of permutahedral representations of cofaces of the simplex.
+ * @param[in] value_dim The dimension of the cofaces. Must be between the dimension of the simplex and the ambient
+ * dimension (the size of the vertex).
+ */
+ Coface_range coface_range(std::size_t value_dim) const {
+ return Coface_range(Coface_iterator(*this, value_dim), Coface_iterator());
+ }
+
+ /** \brief Returns a range of permutahedral representations of cofacets of the simplex.
+ * The dimension of the simplex must be strictly different from the ambient dimension (the size of the vertex).
+ */
+ Coface_range cofacet_range() const {
+ return Coface_range(Coface_iterator(*this, dimension() + 1), Coface_iterator());
+ }
+
+ /** \brief Returns true, if the simplex is a face of other simplex.
+ *
+ * @param[in] other A simplex that is potential a coface of the current simplex.
+ */
+ bool is_face_of(const Permutahedral_representation& other) const {
+ using Part = typename OrderedSetPartition::value_type;
+
+ if (other.dimension() < dimension()) return false;
+ if (other.vertex_.size() != vertex_.size())
+ std::cerr << "Error: Permutahedral_representation::is_face_of: incompatible ambient dimensions.\n";
+
+ Vertex v_self = vertex_, v_other = other.vertex_;
+ auto self_partition_it = partition_.begin();
+ auto other_partition_it = other.partition_.begin();
+ while (self_partition_it != partition_.end()) {
+ while (other_partition_it != other.partition_.end() && v_self != v_other) {
+ const Part& other_part = *other_partition_it++;
+ if (other_partition_it == other.partition_.end()) return false;
+ for (const auto& k : other_part) v_other[k]++;
+ }
+ if (other_partition_it == other.partition_.end()) return false;
+ const Part& self_part = *self_partition_it++;
+ if (self_partition_it == partition_.end()) return true;
+ for (const auto& k : self_part) v_self[k]++;
+ }
+ return true;
+ }
+
+ private:
+ Vertex vertex_;
+ OrderedSetPartition partition_;
+};
+
+/** \brief Print a permutahedral representation to a stream.
+ * \ingroup coxeter_triangulation
+ *
+ * @param[in] os The output stream.
+ * @param[in] simplex A simplex represented by its permutahedral representation.
+ */
+template <class Vertex, class OrderedSetPartition>
+std::ostream& operator<<(std::ostream& os, const Permutahedral_representation<Vertex, OrderedSetPartition>& simplex) {
+ // vertex part
+ os << "(";
+ if (simplex.vertex().empty()) {
+ os << ")";
+ return os;
+ }
+ auto v_it = simplex.vertex().begin();
+ os << *v_it++;
+ for (; v_it != simplex.vertex().end(); ++v_it) os << ", " << *v_it;
+ os << ")";
+
+ // ordered partition part
+ using Part = typename OrderedSetPartition::value_type;
+ auto print_part = [&os](const Part& p) {
+ os << "{";
+ if (p.empty()) {
+ os << "}";
+ }
+ auto p_it = p.begin();
+ os << *p_it++;
+ for (; p_it != p.end(); ++p_it) os << ", " << *p_it;
+ os << "}";
+ };
+ os << " [";
+ if (simplex.partition().empty()) {
+ os << "]";
+ return os;
+ }
+ auto o_it = simplex.partition().begin();
+ print_part(*o_it++);
+ for (; o_it != simplex.partition().end(); ++o_it) {
+ os << ", ";
+ print_part(*o_it);
+ }
+ os << "]";
+ return os;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // PERMUTAHEDRAL_REPRESENTATION_H_
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h
new file mode 100644
index 00000000..5f382e31
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h
@@ -0,0 +1,83 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_COMBINATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_COMBINATION_ITERATOR_H_
+
+#include <vector>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate combinations of
+ * k elements in a set of n elements.
+ * Based on the algorithm by Mifsud.
+ */
+class Combination_iterator
+ : public boost::iterator_facade<Combination_iterator, std::vector<uint> const, boost::forward_traversal_tag> {
+ typedef std::vector<uint> value_t;
+
+ protected:
+ friend class boost::iterator_core_access;
+
+ bool equal(Combination_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (value_[0] == n_ - k_) {
+ is_end_ = true;
+ return;
+ }
+ uint j = k_ - 1;
+ if (value_[j] < n_ - 1) {
+ value_[j]++;
+ return;
+ }
+ for (; j > 0; --j)
+ if (value_[j - 1] < n_ - k_ + j - 1) {
+ value_[j - 1]++;
+ for (uint s = j; s < k_; s++) value_[s] = value_[j - 1] + s - (j - 1);
+ return;
+ }
+ }
+
+ public:
+ Combination_iterator(const uint& n, const uint& k) : value_(k), is_end_(n == 0), n_(n), k_(k) {
+ for (uint i = 0; i < k; ++i) value_[i] = i;
+ }
+
+ // Used for the creating an end iterator
+ Combination_iterator() : is_end_(true), n_(0), k_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) {
+ is_end_ = false;
+ for (uint i = 0; i < n_; ++i) value_[i] = i;
+ }
+ }
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+
+ uint n_;
+ uint k_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
new file mode 100644
index 00000000..594b6fbf
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
@@ -0,0 +1,113 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_INTEGER_COMBINATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_INTEGER_COMBINATION_ITERATOR_H_
+
+#include <vector>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate combinations of
+ * k elements in a set of n elements.
+ * Based on the algorithm by Mifsud.
+ */
+class Integer_combination_iterator
+ : public boost::iterator_facade<Integer_combination_iterator, std::vector<uint> const,
+ boost::forward_traversal_tag> {
+ using value_t = std::vector<uint>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Integer_combination_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ uint j1 = 0;
+ uint s = 0;
+ while (value_[j1] == 0 && j1 < k_) j1++;
+ uint j2 = j1 + 1;
+ while (value_[j2] == bounds_[j2]) {
+ if (bounds_[j2] != 0) {
+ s += value_[j1];
+ value_[j1] = 0;
+ j1 = j2;
+ }
+ j2++;
+ }
+ if (j2 >= k_) {
+ is_end_ = true;
+ return;
+ }
+ s += value_[j1] - 1;
+ value_[j1] = 0;
+ value_[j2]++;
+ uint i = 0;
+ while (s >= bounds_[i]) {
+ value_[i] = bounds_[i];
+ s -= bounds_[i];
+ i++;
+ }
+ value_[i++] = s;
+ }
+
+ public:
+ template <class Bound_range>
+ Integer_combination_iterator(const uint& n, const uint& k, const Bound_range& bounds)
+ : value_(k + 2), is_end_(n == 0 || k == 0), k_(k) {
+ bounds_.reserve(k + 2);
+ uint sum_radices = 0;
+ for (auto b : bounds) {
+ bounds_.push_back(b);
+ sum_radices += b;
+ }
+ bounds_.push_back(2);
+ bounds_.push_back(1);
+ if (n > sum_radices) {
+ is_end_ = true;
+ return;
+ }
+ uint i = 0;
+ uint s = n;
+ while (s >= bounds_[i]) {
+ value_[i] = bounds_[i];
+ s -= bounds_[i];
+ i++;
+ }
+ value_[i++] = s;
+
+ while (i < k_) value_[i++] = 0;
+ value_[k] = 1;
+ value_[k + 1] = 0;
+ }
+
+ // Used for the creating an end iterator
+ Integer_combination_iterator() : is_end_(true), k_(0) {}
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current integer combination is the final one
+
+ uint k_;
+ std::vector<uint> bounds_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h
new file mode 100644
index 00000000..866079fa
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h
@@ -0,0 +1,93 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_ORDERED_SET_PARTITION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_ORDERED_SET_PARTITION_ITERATOR_H_
+
+#include <vector>
+#include <limits>
+
+#include <gudhi/Permutahedral_representation/Permutation_iterator.h>
+#include <gudhi/Permutahedral_representation/Set_partition_iterator.h>
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that represents an ordered set partition of a set {0,...,n-1} in k parts as
+ * a pair of an unordered set partition given in lexicographic order and
+ * a permutation of the parts.
+ */
+struct Ordered_set_partition {
+ Set_partition_iterator s_it_;
+ Permutation_iterator p_it_;
+
+ // Ordered_set_partition(const Set_partition_iterator& s_it, const Permutation_iterator& p_it)
+ // : s_it_(s_it), p_it_(p_it) {}
+
+ const std::vector<uint> operator[](const uint& i) const { return (*s_it_)[(*p_it_)[i]]; }
+
+ std::size_t size() const { return s_it_->size(); }
+};
+
+/** \brief Class that allows the user to generate set partitions of a set {0,...,n-1} in k parts.
+ *
+ */
+class Ordered_set_partition_iterator
+ : public boost::iterator_facade<Ordered_set_partition_iterator, Ordered_set_partition const,
+ boost::forward_traversal_tag> {
+ using value_t = Ordered_set_partition;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Ordered_set_partition_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++value_.p_it_ == p_end_) {
+ if (++value_.s_it_ == s_end_) {
+ is_end_ = true;
+ return;
+ } else
+ value_.p_it_.reinitialize();
+ }
+ }
+
+ public:
+ Ordered_set_partition_iterator(const uint& n, const uint& k)
+ : value_({Set_partition_iterator(n, k), Permutation_iterator(k)}), is_end_(n == 0) {}
+
+ // Used for the creating an end iterator
+ Ordered_set_partition_iterator() : is_end_(true) {}
+
+ void reinitialize() {
+ is_end_ = false;
+ value_.p_it_.reinitialize();
+ value_.s_it_.reinitialize();
+ }
+
+ private:
+ Set_partition_iterator s_end_; // Set partition iterator and the corresponding end iterator
+ Permutation_iterator p_end_; // Permutation iterator and the corresponding end iterator
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
new file mode 100644
index 00000000..1a63d2f7
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
@@ -0,0 +1,256 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_PERMUTAHEDRAL_REPRESENTATION_ITERATORS_H_
+#define PERMUTAHEDRAL_REPRESENTATION_PERMUTAHEDRAL_REPRESENTATION_ITERATORS_H_
+
+#include <gudhi/Permutahedral_representation/Size_range.h>
+#include <gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h>
+#include <gudhi/Permutahedral_representation/Integer_combination_iterator.h>
+#include <gudhi/Permutahedral_representation/Combination_iterator.h>
+#include <gudhi/Permutahedral_representation/face_from_indices.h>
+#include <boost/iterator/iterator_facade.hpp>
+
+#include <vector>
+#include <iostream>
+#include <algorithm> // for std::find
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \addtogroup coxeter_triangulation
+ * Iterator types for Permutahedral_representation
+ * @{
+ */
+
+/** \brief Iterator over the vertices of a simplex
+ * represented by its permutahedral representation.
+ *
+ * Forward iterator, 'value_type' is Permutahedral_representation::Vertex.*/
+template <class Permutahedral_representation>
+class Vertex_iterator
+ : public boost::iterator_facade<Vertex_iterator<Permutahedral_representation>,
+ typename Permutahedral_representation::Vertex const, boost::forward_traversal_tag> {
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ using value_t = Vertex;
+
+ bool equal(Vertex_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void update_value() {
+ std::size_t d = value_.size();
+ for (auto i : *o_it_)
+ if (i != d)
+ value_[i]++;
+ else
+ for (std::size_t j = 0; j < d; j++) value_[j]--;
+ }
+
+ void increment() {
+ if (is_end_) return;
+ update_value();
+ if (++o_it_ == o_end_) is_end_ = true;
+ }
+
+ public:
+ Vertex_iterator(const Permutahedral_representation& simplex)
+ : o_it_(simplex.partition().begin()),
+ o_end_(simplex.partition().end()),
+ value_(simplex.vertex()),
+ is_end_(o_it_ == o_end_) {}
+
+ Vertex_iterator() : is_end_(true) {}
+
+ private:
+ typename Ordered_partition::const_iterator o_it_, o_end_;
+ value_t value_;
+ bool is_end_;
+
+}; // Vertex_iterator
+
+/*---------------------------------------------------------------------------*/
+/** \brief Iterator over the k-faces of a simplex
+ * given by its permutahedral representation.
+ *
+ * Forward iterator, value_type is Permutahedral_representation. */
+template <class Permutahedral_representation>
+class Face_iterator : public boost::iterator_facade<Face_iterator<Permutahedral_representation>,
+ Permutahedral_representation const, boost::forward_traversal_tag> {
+ using value_t = Permutahedral_representation;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ bool equal(Face_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++c_it_ == c_end_) {
+ is_end_ = true;
+ return;
+ }
+ update_value();
+ }
+
+ void update_value() {
+ // Combination *c_it_ is supposed to be sorted in increasing order
+ value_ = face_from_indices<Permutahedral_representation>(simplex_, *c_it_);
+ }
+
+ public:
+ Face_iterator(const Permutahedral_representation& simplex, const uint& k)
+ : simplex_(simplex),
+ k_(k),
+ l_(simplex.dimension()),
+ c_it_(l_ + 1, k_ + 1),
+ is_end_(k_ > l_),
+ value_({Vertex(simplex.vertex().size()), Ordered_partition(k + 1)}) {
+ update_value();
+ }
+
+ // Used for the creating an end iterator
+ Face_iterator() : is_end_(true) {}
+
+ private:
+ Permutahedral_representation simplex_; // Input simplex
+ uint k_;
+ uint l_; // Dimension of the input simplex
+ Combination_iterator c_it_, c_end_; // indicates the vertices in the current face
+
+ bool is_end_; // is true when the current permutation is the final one
+ value_t value_; // the dereference value
+
+}; // Face_iterator
+
+/*---------------------------------------------------------------------------*/
+/** \brief Iterator over the k-cofaces of a simplex
+ * given by its permutahedral representation.
+ *
+ * Forward iterator, value_type is Permutahedral_representation. */
+template <class Permutahedral_representation>
+class Coface_iterator
+ : public boost::iterator_facade<Coface_iterator<Permutahedral_representation>, Permutahedral_representation const,
+ boost::forward_traversal_tag> {
+ using value_t = Permutahedral_representation;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ bool equal(Coface_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ uint i = 0;
+ for (; i < k_ + 1; i++) {
+ if (++(o_its_[i]) != o_end_) break;
+ }
+ if (i == k_ + 1) {
+ if (++i_it_ == i_end_) {
+ is_end_ = true;
+ return;
+ }
+ o_its_.clear();
+ for (uint j = 0; j < k_ + 1; j++)
+ o_its_.emplace_back(simplex_.partition()[j].size(), (*i_it_)[j] + 1);
+ } else
+ for (uint j = 0; j < i; j++) o_its_[j].reinitialize();
+ update_value();
+ }
+
+ void update_value() {
+ value_.vertex() = simplex_.vertex();
+ for (auto& p : value_.partition()) p.clear();
+ uint u_ = 0; // the part in o_its_[k_] that contains t_
+ for (; u_ <= (*i_it_)[k_]; u_++) {
+ auto range = (*o_its_[k_])[u_];
+ if (std::find(range.begin(), range.end(), t_) != range.end()) break;
+ }
+ uint i = 0;
+ for (uint j = u_ + 1; j <= (*i_it_)[k_]; j++, i++)
+ for (uint b : (*o_its_[k_])[j]) {
+ uint c = simplex_.partition()[k_][b];
+ value_.partition()[i].push_back(c);
+ value_.vertex()[c]--;
+ }
+ for (uint h = 0; h < k_; h++)
+ for (uint j = 0; j <= (*i_it_)[h]; j++, i++) {
+ for (uint b : (*o_its_[h])[j]) value_.partition()[i].push_back(simplex_.partition()[h][b]);
+ }
+ for (uint j = 0; j <= u_; j++, i++)
+ for (uint b : (*o_its_[k_])[j]) value_.partition()[i].push_back(simplex_.partition()[k_][b]);
+ // sort the values in each part (probably not needed)
+ for (auto& part : value_.partition()) std::sort(part.begin(), part.end());
+ }
+
+ public:
+ Coface_iterator(const Permutahedral_representation& simplex, const uint& l)
+ : simplex_(simplex),
+ d_(simplex.vertex().size()),
+ l_(l),
+ k_(simplex.dimension()),
+ i_it_(l_ - k_, k_ + 1, Size_range<Ordered_partition>(simplex.partition())),
+ is_end_(k_ > l_),
+ value_({Vertex(d_), Ordered_partition(l_ + 1)}) {
+ uint j = 0;
+ for (; j < simplex_.partition()[k_].size(); j++)
+ if (simplex_.partition()[k_][j] == d_) {
+ t_ = j;
+ break;
+ }
+ if (j == simplex_.partition()[k_].size()) {
+ std::cerr << "Coface iterator: the argument simplex is not a permutahedral representation\n";
+ is_end_ = true;
+ return;
+ }
+ for (uint i = 0; i < k_ + 1; i++)
+ o_its_.emplace_back(simplex_.partition()[i].size(), (*i_it_)[i] + 1);
+ update_value();
+ }
+
+ // Used for the creating an end iterator
+ Coface_iterator() : is_end_(true) {}
+
+ private:
+ Permutahedral_representation simplex_; // Input simplex
+ uint d_; // Ambient dimension
+ uint l_; // Dimension of the coface
+ uint k_; // Dimension of the input simplex
+ uint t_; // The position of d in simplex_.partition()[k_]
+ Integer_combination_iterator i_it_, i_end_; // indicates in how many parts each simplex_[i] is subdivided
+ std::vector<Ordered_set_partition_iterator> o_its_; // indicates subdivision for each simplex_[i]
+ Ordered_set_partition_iterator o_end_; // one end for all o_its_
+
+ bool is_end_; // is true when the current permutation is the final one
+ value_t value_; // the dereference value
+
+}; // Coface_iterator
+
+/** @} */
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h
new file mode 100644
index 00000000..0f91d41c
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h
@@ -0,0 +1,120 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_PERMUTATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_PERMUTATION_ITERATOR_H_
+
+#include <cstdlib> // for std::size_t
+#include <vector>
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate permutations.
+ * Based on the optimization of the Heap's algorithm by Sedgewick.
+ */
+class Permutation_iterator
+ : public boost::iterator_facade<Permutation_iterator, std::vector<uint> const, boost::forward_traversal_tag> {
+ using value_t = std::vector<uint>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Permutation_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void swap_two_indices(std::size_t i, std::size_t j) {
+ uint t = value_[i];
+ value_[i] = value_[j];
+ value_[j] = t;
+ }
+
+ void elementary_increment() {
+ uint j = 0;
+ while (d_[j] == j + 1) {
+ d_[j] = 0;
+ ++j;
+ }
+ if (j == n_ - 1) {
+ is_end_ = true;
+ return;
+ }
+ uint k = j + 1;
+ uint x = (k % 2 ? d_[j] : 0);
+ swap_two_indices(k, x);
+ ++d_[j];
+ }
+
+ void elementary_increment_optim_3() {
+ if (ct_ != 0) {
+ --ct_;
+ swap_two_indices(1 + (ct_ % 2), 0);
+ } else {
+ ct_ = 5;
+ uint j = 2;
+ while (d_[j] == j + 1) {
+ d_[j] = 0;
+ ++j;
+ }
+ if (j == n_ - 1) {
+ is_end_ = true;
+ return;
+ }
+ uint k = j + 1;
+ uint x = (k % 2 ? d_[j] : 0);
+ swap_two_indices(k, x);
+ ++d_[j];
+ }
+ }
+
+ void increment() {
+ if (optim_3_)
+ elementary_increment_optim_3();
+ else
+ elementary_increment();
+ }
+
+ public:
+ Permutation_iterator(const uint& n) : value_(n), is_end_(n == 0), optim_3_(n >= 3), n_(n), d_(n), ct_(5) {
+ for (uint i = 0; i < n; ++i) {
+ value_[i] = i;
+ d_[i] = 0;
+ }
+ if (n > 0) d_[n - 1] = -1;
+ }
+
+ // Used for the creating an end iterator
+ Permutation_iterator() : is_end_(true), n_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) is_end_ = false;
+ }
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+ bool optim_3_; // true if n>=3. for n >= 3, the algorithm is optimized
+
+ uint n_;
+ std::vector<uint> d_; // mix radix digits with radix [2 3 4 ... n-1 (sentinel=-1)]
+ uint ct_; // counter with values in {0,...,5} used in the n>=3 optimization.
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h
new file mode 100644
index 00000000..94ac10c2
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h
@@ -0,0 +1,111 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SET_PARTITION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SET_PARTITION_ITERATOR_H_
+
+#include <vector>
+#include <limits>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate set partitions of a set {0,...,n-1} in k parts.
+ *
+ */
+class Set_partition_iterator
+ : public boost::iterator_facade<Set_partition_iterator, std::vector<std::vector<uint>> const,
+ boost::forward_traversal_tag> {
+ using value_t = std::vector<std::vector<uint>>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Set_partition_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void update_value() {
+ for (uint i = 0; i < k_; i++) value_[i].clear();
+ for (uint i = 0; i < n_; i++) value_[rgs_[i]].push_back(i);
+ }
+
+ void increment() {
+ if (k_ <= 1) {
+ is_end_ = true;
+ return;
+ }
+ uint i = n_ - 1;
+ while (rgs_[i] + 1 > max_[i] || rgs_[i] + 1 >= k_) i--;
+ if (i == 0) {
+ is_end_ = true;
+ return;
+ }
+ rgs_[i]++;
+ uint mm = max_[i];
+ mm += (rgs_[i] >= mm);
+ max_[i + 1] = mm;
+ while (++i < n_) {
+ rgs_[i] = 0;
+ max_[i + 1] = mm;
+ }
+ uint p = k_;
+ if (mm < p) do {
+ max_[i] = p;
+ --i;
+ --p;
+ rgs_[i] = p;
+ } while (max_[i] < p);
+ update_value();
+ }
+
+ public:
+ Set_partition_iterator(const uint& n, const uint& k)
+ : value_(k), rgs_(n, 0), max_(n + 1), is_end_(n == 0), n_(n), k_(k) {
+ max_[0] = std::numeric_limits<uint>::max();
+ for (uint i = 0; i <= n - k; ++i) value_[0].push_back(i);
+ for (uint i = n - k + 1, j = 1; i < n; ++i, ++j) {
+ rgs_[i] = j;
+ value_[j].push_back(i);
+ }
+ for (uint i = 1; i <= n; i++) max_[i] = rgs_[i - 1] + 1;
+ update_value();
+ }
+
+ // Used for creating an end iterator
+ Set_partition_iterator() : is_end_(true), n_(0), k_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) is_end_ = false;
+ for (uint i = 0; i <= n_ - k_; ++i) rgs_[i] = 0;
+ for (uint i = n_ - k_ + 1, j = 1; i < n_; ++i, ++j) rgs_[i] = j;
+ for (uint i = 1; i <= n_; i++) max_[i] = rgs_[i - 1] + 1;
+ update_value();
+ }
+
+ private:
+ value_t value_; // the dereference value
+ std::vector<uint> rgs_; // restricted growth string
+ std::vector<uint> max_; // max_[i] = max(rgs_[0],...,rgs[i-1]) + 1
+ bool is_end_; // is true when the current permutation is the final one
+
+ uint n_;
+ uint k_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h
new file mode 100644
index 00000000..905d68d5
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h
@@ -0,0 +1,54 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SIMPLEX_COMPARATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SIMPLEX_COMPARATOR_H_
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Simplex_comparator
+ * \brief A comparator class for Permutahedral_representation.
+ * The comparison is in lexicographic order first on
+ * vertices and then on ordered partitions with sorted parts.
+ * The lexicographic order forces that any face is larger than
+ * a coface.
+ *
+ * \tparam Permutahdral_representation_ Needs to be
+ * Permutahedral_representation<Vertex_, Ordered_set_partition_>
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Permutahedral_representation_>
+struct Simplex_comparator {
+ /** \brief Comparison between two permutahedral representations.
+ * Both permutahedral representations need to be valid and
+ * the vertices of both permutahedral representations need to be of the same size.
+ */
+ bool operator()(const Permutahedral_representation_& lhs, const Permutahedral_representation_& rhs) const {
+ if (lhs.vertex() < rhs.vertex()) return true;
+ if (lhs.vertex() > rhs.vertex()) return false;
+
+ if (lhs.partition().size() > rhs.partition().size()) return true;
+ if (lhs.partition().size() < rhs.partition().size()) return false;
+
+ if (lhs.partition() < rhs.partition()) return true;
+ if (lhs.partition() > rhs.partition()) return false;
+
+ return false;
+ }
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
new file mode 100644
index 00000000..6b137744
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
@@ -0,0 +1,73 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SIZE_RANGE_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SIZE_RANGE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Auxiliary iterator class for sizes of parts in an ordered set partition.
+ */
+template <class T_it>
+class Size_iterator
+ : public boost::iterator_facade<Size_iterator<T_it>, std::size_t const, boost::forward_traversal_tag> {
+ friend class boost::iterator_core_access;
+
+ private:
+ bool equal(Size_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ std::size_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++t_it_ == t_end_) {
+ is_end_ = true;
+ return;
+ }
+ value_ = t_it_->size() - 1;
+ }
+
+ public:
+ Size_iterator(const T_it& t_begin, const T_it& t_end) : t_it_(t_begin), t_end_(t_end), is_end_(t_begin == t_end) {
+ if (!is_end_) value_ = t_it_->size() - 1;
+ }
+
+ private:
+ T_it t_it_, t_end_;
+ bool is_end_;
+ std::size_t value_;
+};
+
+template <class T>
+class Size_range {
+ const T& t_;
+
+ public:
+ typedef Size_iterator<typename T::const_iterator> iterator;
+
+ Size_range(const T& t) : t_(t) {}
+
+ std::size_t operator[](std::size_t i) const { return t_[i].size() - 1; }
+
+ iterator begin() const { return iterator(t_.begin(), t_.end()); }
+
+ iterator end() const { return iterator(t_.end(), t_.end()); }
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h
new file mode 100644
index 00000000..47120689
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h
@@ -0,0 +1,66 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_FACE_FROM_INDICES_H_
+#define PERMUTAHEDRAL_REPRESENTATION_FACE_FROM_INDICES_H_
+
+#include <cstdlib> // for std::size_t
+#include <algorithm>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Computes the permutahedral representation of a face of a given simplex
+ * and a range of the vertex indices that compose the face.
+ *
+ * \tparam Permutahedral_representation has to be Permutahedral_representation
+ * \tparam Index_range is a range of unsigned integers taking values in 0,...,k,
+ * where k is the dimension of the simplex simplex.
+ *
+ * @param[in] simplex Input simplex.
+ * @param[in] indices Input range of indices.
+ */
+template <class Permutahedral_representation, class Index_range>
+Permutahedral_representation face_from_indices(const Permutahedral_representation& simplex,
+ const Index_range& indices) {
+ using range_index = typename Index_range::value_type;
+ using Ordered_set_partition = typename Permutahedral_representation::OrderedSetPartition;
+ using Part = typename Ordered_set_partition::value_type;
+ using part_index = typename Part::value_type;
+ Permutahedral_representation value;
+ std::size_t d = simplex.vertex().size();
+ value.vertex() = simplex.vertex();
+ std::size_t k = indices.size() - 1;
+ value.partition().resize(k + 1);
+ std::size_t l = simplex.partition().size() - 1;
+ for (std::size_t h = 1; h < k + 1; h++)
+ for (range_index i = indices[h - 1]; i < indices[h]; i++)
+ for (part_index j : simplex.partition()[i]) value.partition()[h - 1].push_back(j);
+ for (range_index i = indices[k]; i < l + 1; i++)
+ for (part_index j : simplex.partition()[i]) value.partition()[k].push_back(j);
+ for (range_index i = 0; i < indices[0]; i++)
+ for (part_index j : simplex.partition()[i]) {
+ if (j != d)
+ value.vertex()[j]++;
+ else
+ for (std::size_t l = 0; l < d; l++) value.vertex()[l]--;
+ value.partition()[k].push_back(j);
+ }
+ // sort the values in each part (probably not needed)
+ for (auto& part : value.partition()) std::sort(part.begin(), part.end());
+ return value;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/test/CMakeLists.txt b/src/Coxeter_triangulation/test/CMakeLists.txt
new file mode 100644
index 00000000..74ded91e
--- /dev/null
+++ b/src/Coxeter_triangulation/test/CMakeLists.txt
@@ -0,0 +1,30 @@
+project(Coxeter_triangulation_test)
+
+include(GUDHI_boost_test)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+ add_executable ( Coxeter_triangulation_permutahedral_representation_test perm_rep_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_permutahedral_representation_test)
+
+ add_executable ( Coxeter_triangulation_freudenthal_triangulation_test freud_triang_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_freudenthal_triangulation_test)
+
+ add_executable ( Coxeter_triangulation_functions_test function_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_functions_test)
+
+ # because of random_orthogonal_matrix inclusion
+ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_executable ( Coxeter_triangulation_random_orthogonal_matrix_function_test random_orthogonal_matrix_function_test.cpp )
+ target_link_libraries(Coxeter_triangulation_random_orthogonal_matrix_function_test ${CGAL_LIBRARY})
+ gudhi_add_boost_test(Coxeter_triangulation_random_orthogonal_matrix_function_test)
+ endif()
+
+ add_executable ( Coxeter_triangulation_oracle_test oracle_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_oracle_test)
+
+ add_executable ( Coxeter_triangulation_manifold_tracing_test manifold_tracing_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_manifold_tracing_test)
+
+ add_executable ( Coxeter_triangulation_cell_complex_test cell_complex_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_cell_complex_test)
+endif() \ No newline at end of file
diff --git a/src/Coxeter_triangulation/test/cell_complex_test.cpp b/src/Coxeter_triangulation/test/cell_complex_test.cpp
new file mode 100644
index 00000000..4f7f3ec5
--- /dev/null
+++ b/src/Coxeter_triangulation/test/cell_complex_test.cpp
@@ -0,0 +1,59 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "cell_complex"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Debug_utils.h>
+#include <gudhi/IO/output_debug_traces_to_html.h>
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Function_torus_in_R3.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(cell_complex) {
+ double radius = 1.1111;
+ Function_torus_in_R3 fun_torus(radius, 3 * radius);
+ Eigen::VectorXd seed = fun_torus.seed();
+ Function_Sm_in_Rd fun_bound(2.5 * radius, 2, seed);
+
+ auto oracle = make_oracle(fun_torus, fun_bound);
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ std::size_t interior_sc_map_size0 = cell_complex.interior_simplex_cell_map(0).size();
+ std::size_t interior_sc_map_size1 = cell_complex.interior_simplex_cell_map(1).size();
+ std::size_t interior_sc_map_size2 = cell_complex.interior_simplex_cell_map(2).size();
+ std::size_t boundary_sc_map_size0 = cell_complex.boundary_simplex_cell_map(0).size();
+ std::size_t boundary_sc_map_size1 = cell_complex.boundary_simplex_cell_map(1).size();
+ BOOST_CHECK(interior_simplex_map.size() == interior_sc_map_size0);
+ BOOST_CHECK(boundary_sc_map_size0 - boundary_sc_map_size1 == 0);
+ BOOST_CHECK(interior_sc_map_size0 - interior_sc_map_size1 + interior_sc_map_size2 == 0);
+}
diff --git a/src/Coxeter_triangulation/test/freud_triang_test.cpp b/src/Coxeter_triangulation/test/freud_triang_test.cpp
new file mode 100644
index 00000000..2cf8f00e
--- /dev/null
+++ b/src/Coxeter_triangulation/test/freud_triang_test.cpp
@@ -0,0 +1,114 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "freudenthal_triangulation"
+#include <boost/test/unit_test.hpp>
+
+#include <gudhi/Unitary_tests_utils.h>
+#include <gudhi/Freudenthal_triangulation.h>
+#include <gudhi/Coxeter_triangulation.h>
+
+BOOST_AUTO_TEST_CASE(freudenthal_triangulation) {
+ // Point location check
+ typedef std::vector<double> Point;
+ typedef Gudhi::coxeter_triangulation::Freudenthal_triangulation<> FK_triangulation;
+ typedef typename FK_triangulation::Simplex_handle Simplex_handle;
+ typedef typename FK_triangulation::Vertex_handle Vertex_handle;
+ typedef typename Simplex_handle::OrderedSetPartition Ordered_set_partition;
+ typedef typename Ordered_set_partition::value_type Part;
+
+ FK_triangulation tr(3);
+
+ // Point location check
+ {
+ Point point({3, -1, 0});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -1, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 1, 2, 3})}));
+ }
+
+ {
+ Point point({3.5, -1.5, 0.5});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 1, 2}), Part({3})}));
+ }
+
+ {
+ Point point({3.5, -1.8, 0.5});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 2}), Part({1}), Part({3})}));
+ }
+
+ {
+ Point point({3.5, -1.8, 0.3});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0}), Part({2}), Part({1}), Part({3})}));
+ }
+
+ // Dimension check
+ BOOST_CHECK(tr.dimension() == 3);
+ // Matrix check
+ Eigen::MatrixXd default_matrix = Eigen::MatrixXd::Identity(3, 3);
+ BOOST_CHECK(tr.matrix() == default_matrix);
+ // Vector check
+ Eigen::MatrixXd default_offset = Eigen::VectorXd::Zero(3);
+ BOOST_CHECK(tr.offset() == default_offset);
+
+ // Barycenter check
+ Point point({3.5, -1.8, 0.3});
+ Simplex_handle s = tr.locate_point(point);
+ Eigen::Vector3d barycenter_cart = Eigen::Vector3d::Zero();
+ for (auto v : s.vertex_range())
+ for (std::size_t i = 0; i < v.size(); i++) barycenter_cart(i) += v[i];
+ barycenter_cart /= 4.; // simplex is three-dimensional
+ Eigen::Vector3d barycenter = tr.barycenter(s);
+ for (std::size_t i = 0; (long int)i < barycenter.size(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(barycenter(i), barycenter_cart(i), 1e-7);
+
+ // Barycenter check for twice the scale
+ s = tr.locate_point(point, 2);
+ barycenter_cart = Eigen::Vector3d::Zero();
+ for (auto v : s.vertex_range())
+ for (std::size_t i = 0; i < v.size(); i++) barycenter_cart(i) += v[i];
+ barycenter_cart /= 3.; // simplex is now a two-dimensional face
+ barycenter_cart /= 2.; // scale
+ barycenter = tr.barycenter(s, 2);
+ for (std::size_t i = 0; (long int)i < barycenter.size(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(barycenter(i), barycenter_cart(i), 1e-7);
+
+ // Matrix and offset change check
+ Eigen::MatrixXd new_matrix(3, 3);
+ new_matrix << 1, 0, 0, -1, 1, 0, -1, 0, 1;
+ Eigen::Vector3d new_offset(1.5, 1, 0.5);
+ tr.change_matrix(new_matrix);
+ tr.change_offset(new_offset);
+
+ BOOST_CHECK(tr.matrix() == new_matrix);
+ BOOST_CHECK(tr.offset() == new_offset);
+}
+
+#ifdef GUDHI_DEBUG
+BOOST_AUTO_TEST_CASE(freudenthal_triangulation_exceptions_in_debug_mode) {
+ // Point location check
+ typedef Gudhi::coxeter_triangulation::Freudenthal_triangulation<> FK_triangulation;
+
+ BOOST_CHECK_THROW (FK_triangulation tr(3, Eigen::MatrixXd::Identity(3, 3), Eigen::VectorXd::Zero(4)),
+ std::invalid_argument);
+
+ FK_triangulation tr(3);
+ // Point of dimension 4
+ std::vector<double> point({3.5, -1.8, 0.3, 4.1});
+ BOOST_CHECK_THROW (tr.locate_point(point), std::invalid_argument);
+}
+#endif
diff --git a/src/Coxeter_triangulation/test/function_test.cpp b/src/Coxeter_triangulation/test/function_test.cpp
new file mode 100644
index 00000000..43dbcb75
--- /dev/null
+++ b/src/Coxeter_triangulation/test/function_test.cpp
@@ -0,0 +1,158 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+// workaround for the annoying boost message in boost 1.69
+#define BOOST_PENDING_INTEGER_LOG2_HPP
+#include <boost/integer/integer_log2.hpp>
+// end workaround
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "function"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Function_affine_plane_in_Rd.h>
+#include <gudhi/Functions/Constant_function.h>
+#include <gudhi/Functions/Function_chair_in_R3.h>
+#include <gudhi/Functions/Function_torus_in_R3.h>
+#include <gudhi/Functions/Function_whitney_umbrella_in_R3.h>
+#include <gudhi/Functions/Function_lemniscate_revolution_in_R3.h>
+#include <gudhi/Functions/Function_iron_in_R3.h>
+#include <gudhi/Functions/Function_moment_curve_in_Rd.h>
+#include <gudhi/Functions/Embed_in_Rd.h>
+#include <gudhi/Functions/Translate.h>
+#include <gudhi/Functions/Linear_transformation.h>
+#include <gudhi/Functions/Negation.h>
+#include <gudhi/Functions/Cartesian_product.h>
+#include <gudhi/Functions/PL_approximation.h>
+
+#include <gudhi/Coxeter_triangulation.h>
+
+#include <string>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+template <class Function>
+void test_function(const Function& fun) {
+ Eigen::VectorXd seed = fun.seed();
+ Eigen::VectorXd res_seed = fun(fun.seed());
+ BOOST_CHECK(seed.size() == (long int)fun.amb_d());
+ BOOST_CHECK(res_seed.size() == (long int)fun.cod_d());
+ for (std::size_t i = 0; i < fun.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(res_seed(i), 0., 1e-10);
+}
+
+BOOST_AUTO_TEST_CASE(function) {
+ {
+ // the sphere testing part
+ std::size_t m = 3, d = 5;
+ Eigen::VectorXd center(d);
+ center << 2, 1.5, -0.5, 4.5, -1;
+ double radius = 5;
+ typedef Function_Sm_in_Rd Function_sphere;
+ Function_sphere fun_sphere(radius, m, d, center);
+ test_function(fun_sphere);
+ }
+ {
+ // the affine plane testing part
+ std::size_t m = 0, d = 5;
+ Eigen::MatrixXd normal_matrix = Eigen::MatrixXd::Zero(d, d - m);
+ for (std::size_t i = 0; i < d - m; ++i) normal_matrix(i, i) = 1;
+ typedef Function_affine_plane_in_Rd Function_plane;
+ Function_plane fun_plane(normal_matrix);
+ test_function(fun_plane);
+ }
+ {
+ // the constant function testing part
+ std::size_t k = 2, d = 5;
+ auto x = Eigen::VectorXd::Constant(k, 1);
+ Constant_function fun_const(d, k, x);
+ Eigen::VectorXd res_zero = fun_const(Eigen::VectorXd::Zero(d));
+ for (std::size_t i = 0; i < k; ++i) GUDHI_TEST_FLOAT_EQUALITY_CHECK(res_zero(i), x(i), 1e-10);
+ }
+ {
+ // the chair function
+ Function_chair_in_R3 fun_chair;
+ test_function(fun_chair);
+ }
+ {
+ // the torus function
+ Function_torus_in_R3 fun_torus;
+ test_function(fun_torus);
+ }
+ {
+ // the whitney umbrella function
+ Function_whitney_umbrella_in_R3 fun_umbrella;
+ test_function(fun_umbrella);
+ }
+ {
+ // the lemniscate revolution function
+ Function_lemniscate_revolution_in_R3 fun_lemniscate;
+ test_function(fun_lemniscate);
+ }
+ {
+ // the iron function
+ Function_iron_in_R3 fun_iron;
+ test_function(fun_iron);
+ }
+ {
+ Function_moment_curve_in_Rd fun_moment_curve(3, 5);
+ test_function(fun_moment_curve);
+ }
+ {
+ // function embedding
+ Function_iron_in_R3 fun_iron;
+ auto fun_embed = make_embedding(fun_iron, 5);
+ test_function(fun_iron);
+
+ // function translation
+ Eigen::VectorXd off = Eigen::VectorXd::Random(5);
+ auto fun_trans = translate(fun_embed, off);
+ test_function(fun_trans);
+
+ // function linear transformation
+ Eigen::MatrixXd matrix = Eigen::MatrixXd::Random(5, 5);
+ BOOST_CHECK(matrix.determinant() != 0.);
+ auto fun_lin = make_linear_transformation(fun_trans, matrix);
+ test_function(fun_lin);
+
+ // function negative
+ auto fun_neg = negation(fun_lin);
+ test_function(fun_neg);
+
+ // function product
+ typedef Function_Sm_in_Rd Function_sphere;
+ Function_sphere fun_sphere(1, 1);
+ auto fun_prod = make_product_function(fun_sphere, fun_sphere, fun_sphere);
+ test_function(fun_prod);
+
+ // function PL approximation
+ Coxeter_triangulation<> cox_tr(6);
+ typedef Coxeter_triangulation<>::Vertex_handle Vertex_handle;
+ auto fun_pl = make_pl_approximation(fun_prod, cox_tr);
+ Vertex_handle v0 = Vertex_handle(cox_tr.dimension(), 0);
+ Eigen::VectorXd x0 = cox_tr.cartesian_coordinates(v0);
+ Eigen::VectorXd value0 = fun_prod(x0);
+ Eigen::VectorXd pl_value0 = fun_pl(x0);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(value0(i), pl_value0(i), 1e-10);
+ Vertex_handle v1 = v0;
+ v1[0] += 1;
+ Eigen::VectorXd x1 = cox_tr.cartesian_coordinates(v1);
+ Eigen::VectorXd value1 = fun_prod(x1);
+ Eigen::VectorXd pl_value1 = fun_pl(x1);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(value1(i), pl_value1(i), 1e-10);
+ Eigen::VectorXd pl_value_mid = fun_pl(0.5 * x0 + 0.5 * x1);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(0.5 * value0(i) + 0.5 * value1(i), pl_value_mid(i), 1e-10);
+ }
+}
diff --git a/src/Coxeter_triangulation/test/manifold_tracing_test.cpp b/src/Coxeter_triangulation/test/manifold_tracing_test.cpp
new file mode 100644
index 00000000..63497f5a
--- /dev/null
+++ b/src/Coxeter_triangulation/test/manifold_tracing_test.cpp
@@ -0,0 +1,62 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "manifold_tracing"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(manifold_tracing) {
+ // manifold without boundary
+ Function_Sm_in_Rd fun_sph(5.1111, 2);
+ auto oracle = make_oracle(fun_sph);
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ Eigen::VectorXd seed = fun_sph.seed();
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ typename MT::Out_simplex_map out_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, out_simplex_map);
+
+ for (auto si_pair : out_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d());
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "out_simplex_map.size() = " << out_simplex_map.size() << "\n";
+ BOOST_CHECK(out_simplex_map.size() == 1118);
+
+ // manifold with boundary
+ Function_Sm_in_Rd fun_boundary(3.0, 2, fun_sph.seed());
+ auto oracle_with_boundary = make_oracle(fun_sph, fun_boundary);
+ typename MT::Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle_with_boundary, interior_simplex_map, boundary_simplex_map);
+ for (auto si_pair : interior_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d());
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "interior_simplex_map.size() = " << interior_simplex_map.size() << "\n";
+ BOOST_CHECK(interior_simplex_map.size() == 96);
+ for (auto si_pair : boundary_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d() + 1);
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "boundary_simplex_map.size() = " << boundary_simplex_map.size() << "\n";
+ BOOST_CHECK(boundary_simplex_map.size() == 54);
+}
diff --git a/src/Coxeter_triangulation/test/oracle_test.cpp b/src/Coxeter_triangulation/test/oracle_test.cpp
new file mode 100644
index 00000000..ed2042f5
--- /dev/null
+++ b/src/Coxeter_triangulation/test/oracle_test.cpp
@@ -0,0 +1,56 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "oracle"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <string>
+
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Cartesian_product.h>
+
+#include <gudhi/Coxeter_triangulation.h>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(oracle) {
+ Function_Sm_in_Rd fun_sph(5.1111, 2);
+ auto oracle = make_oracle(fun_sph);
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+
+ Eigen::VectorXd seed = fun_sph.seed();
+ auto s = cox_tr.locate_point(seed);
+
+ std::size_t num_intersected_edges = 0;
+ for (auto f : s.face_range(oracle.cod_d())) {
+ auto qr = oracle.intersects(f, cox_tr);
+ if (qr.success) num_intersected_edges++;
+ auto vertex_it = f.vertex_range().begin();
+ Eigen::Vector3d p1 = cox_tr.cartesian_coordinates(*vertex_it++);
+ Eigen::Vector3d p2 = cox_tr.cartesian_coordinates(*vertex_it++);
+ BOOST_CHECK(vertex_it == f.vertex_range().end());
+ Eigen::MatrixXd m(3, 3);
+ if (qr.success) {
+ m.col(0) = qr.intersection;
+ m.col(1) = p1;
+ m.col(2) = p2;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(m.determinant(), 0.0, 1e-10);
+ }
+ }
+ BOOST_CHECK(num_intersected_edges == 3 || num_intersected_edges == 4);
+}
diff --git a/src/Coxeter_triangulation/test/perm_rep_test.cpp b/src/Coxeter_triangulation/test/perm_rep_test.cpp
new file mode 100644
index 00000000..a668fc66
--- /dev/null
+++ b/src/Coxeter_triangulation/test/perm_rep_test.cpp
@@ -0,0 +1,61 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "permutahedral_representation"
+#include <boost/test/unit_test.hpp>
+
+#include <gudhi/Permutahedral_representation.h>
+
+BOOST_AUTO_TEST_CASE(permutahedral_representation) {
+ typedef std::vector<int> Vertex;
+ typedef std::vector<std::size_t> Part;
+ typedef std::vector<Part> Partition;
+ typedef Gudhi::coxeter_triangulation::Permutahedral_representation<Vertex, Partition> Simplex_handle;
+ Vertex v0(10, 0);
+ Partition omega = {Part({5}), Part({2}), Part({3, 7}), Part({4, 9}), Part({0, 6, 8}), Part({1, 10})};
+ Simplex_handle s(v0, omega);
+
+ // Dimension check
+ BOOST_CHECK(s.dimension() == 5);
+
+ // Vertex number check
+ std::vector<Vertex> vertices;
+ for (auto& v : s.vertex_range()) vertices.push_back(v);
+ BOOST_CHECK(vertices.size() == 6);
+
+ // Facet number check
+ std::vector<Simplex_handle> facets;
+ for (auto& f : s.facet_range()) facets.push_back(f);
+ BOOST_CHECK(facets.size() == 6);
+
+ // Face of dim 3 number check
+ std::vector<Simplex_handle> faces3;
+ for (auto& f : s.face_range(3)) faces3.push_back(f);
+ BOOST_CHECK(faces3.size() == 15);
+
+ // Cofacet number check
+ std::vector<Simplex_handle> cofacets;
+ for (auto& f : s.cofacet_range()) cofacets.push_back(f);
+ BOOST_CHECK(cofacets.size() == 12);
+
+ // Is face check
+ Vertex v1(10, 0);
+ Partition omega1 = {Part({5}), Part({0, 1, 2, 3, 4, 6, 7, 8, 9, 10})};
+ Simplex_handle s1(v1, omega1);
+ Vertex v2(10, 0);
+ v2[1] = -1;
+ Partition omega2 = {Part({1}), Part({5}), Part({2}), Part({3, 7}), Part({4, 9}), Part({0, 6, 8}), Part({10})};
+ Simplex_handle s2(v2, omega2);
+ BOOST_CHECK(s.is_face_of(s));
+ BOOST_CHECK(s1.is_face_of(s));
+ BOOST_CHECK(!s2.is_face_of(s));
+ BOOST_CHECK(s.is_face_of(s2));
+}
diff --git a/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp b/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp
new file mode 100644
index 00000000..84178741
--- /dev/null
+++ b/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp
@@ -0,0 +1,36 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "random_orthogonal_matrix_function"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Functions/random_orthogonal_matrix.h>
+
+#include <string>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+// this test is separated as it requires CGAL
+BOOST_AUTO_TEST_CASE(random_orthogonal_matrix_function) {
+ // random orthogonal matrix
+ Eigen::MatrixXd matrix = random_orthogonal_matrix(5);
+ Eigen::MatrixXd id_matrix = matrix.transpose() * matrix;
+ for (std::size_t i = 0; i < 5; ++i)
+ for (std::size_t j = 0; j < 5; ++j)
+ if (i == j)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(id_matrix(i, j), 1.0, 1e-10);
+ else
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(id_matrix(i, j), 0.0, 1e-10);
+}
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index ec551882..d5664a49 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -1,4 +1,4 @@
-# Doxyfile 1.8.6
+# Doxyfile 1.8.13
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
-PROJECT_NAME = "GUDHI"
+PROJECT_NAME = "@CMAKE_PROJECT_NAME@"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
@@ -46,10 +46,10 @@ PROJECT_NUMBER = "@GUDHI_VERSION@"
PROJECT_BRIEF = "C++ library for Topological Data Analysis (TDA) and Higher Dimensional Geometry Understanding."
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
PROJECT_LOGO =
@@ -58,9 +58,9 @@ PROJECT_LOGO =
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
-OUTPUT_DIRECTORY = "doc/"
+OUTPUT_DIRECTORY =
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
@@ -70,6 +70,14 @@ OUTPUT_DIRECTORY = "doc/"
CREATE_SUBDIRS = NO
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
@@ -85,14 +93,14 @@ CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
@@ -127,7 +135,7 @@ ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
@@ -144,7 +152,7 @@ FULL_PATH_NAMES = YES
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-STRIP_FROM_PATH =
+STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
@@ -153,7 +161,9 @@ STRIP_FROM_PATH =
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
-STRIP_FROM_INC_PATH = include concept
+STRIP_FROM_INC_PATH = include \
+ concept \
+ @CMAKE_SOURCE_DIR@
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
@@ -197,9 +207,9 @@ MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
@@ -220,13 +230,7 @@ TAB_SIZE = 2
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
+ALIASES = gudhi_example_link{2}="@ref \2 \"\1/\2\""
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
@@ -261,11 +265,14 @@ OPTIMIZE_OUTPUT_VHDL = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C.
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
-# Note For files without extension you can use no_extension as a placeholder.
+# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
@@ -282,10 +289,19 @@ EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 0.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 0
+
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
@@ -325,13 +341,20 @@ SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
+# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
@@ -390,7 +413,7 @@ LOOKUP_CACHE_SIZE = 0
# Build related configuration options
#---------------------------------------------------------------------------
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
@@ -400,35 +423,35 @@ LOOKUP_CACHE_SIZE = 0
EXTRACT_ALL = NO
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = NO
-# This flag is only useful for Objective-C code. When set to YES local methods,
+# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
@@ -453,21 +476,21 @@ HIDE_UNDOC_MEMBERS = YES
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = YES
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
+# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
+# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
@@ -481,7 +504,7 @@ HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
+# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
@@ -490,12 +513,19 @@ INTERNAL_DOCS = NO
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
+# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
@@ -523,14 +553,14 @@ INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
+# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
+# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
@@ -575,27 +605,25 @@ SORT_BY_SCOPE_NAME = NO
STRICT_PROTO_MATCHING = NO
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = NO
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = NO
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = NO
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
@@ -620,8 +648,8 @@ ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
@@ -669,12 +697,10 @@ LAYOUT_FILE =
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. Do not use file names with spaces, bibtex cannot handle them. See
-# also \cite for info how to create references.
+# search path. See also \cite for info how to create references.
-CITE_BIB_FILES = biblio/bibliography.bib \
- biblio/how_to_cite_cgal.bib \
- biblio/how_to_cite_gudhi.bib
+CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \
+ @CMAKE_SOURCE_DIR@/biblio/how_to_cite_gudhi.bib
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
@@ -685,10 +711,10 @@ CITE_BIB_FILES = biblio/bibliography.bib \
# messages are off.
# The default value is: NO.
-QUIET = NO
+QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
@@ -696,7 +722,7 @@ QUIET = NO
WARNINGS = YES
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
@@ -713,12 +739,18 @@ WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
@@ -733,7 +765,7 @@ WARN_FORMAT = "$file:$line: $text"
# messages should be written. If left blank the output is written to standard
# error (stderr).
-WARN_LOGFILE =
+WARN_LOGFILE = doxygen.log
#---------------------------------------------------------------------------
# Configuration options related to the input files
@@ -742,10 +774,10 @@ WARN_LOGFILE =
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
-INPUT =
+INPUT = @CMAKE_SOURCE_DIR@
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -758,14 +790,30 @@ INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-#FILE_PATTERNS =
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
+
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.md \
+ *.mm \
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
@@ -780,12 +828,14 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = data/ \
- example/ \
- GudhUI/ \
- cmake/ \
- python/ \
- README.md
+EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \
+ @CMAKE_SOURCE_DIR@/ext/ \
+ @CMAKE_SOURCE_DIR@/README.md \
+ @CMAKE_SOURCE_DIR@/.github \
+ @CMAKE_CURRENT_BINARY_DIR@ \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/GudhUI/ \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/python/
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
@@ -801,7 +851,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
-EXCLUDE_PATTERNS = */utilities/*/*.md
+EXCLUDE_PATTERNS = @GUDHI_DOXYGEN_SOURCE_PREFIX@/@GUDHI_DOXYGEN_UTILS_PATH@/*.md
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
@@ -818,17 +868,16 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH = biblio/ \
- example/ \
- utilities/ \
- data/
+EXAMPLE_PATH = @CMAKE_SOURCE_DIR@ \
+ @CMAKE_SOURCE_DIR@/data/ \
+ @GUDHI_DOXYGEN_EXAMPLE_PATH@
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
-EXAMPLE_PATTERNS =
+EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
@@ -857,6 +906,10 @@ IMAGE_PATH = @GUDHI_DOXYGEN_IMAGE_PATH@
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
INPUT_FILTER =
@@ -866,11 +919,15 @@ INPUT_FILTER =
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
+# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
@@ -889,7 +946,7 @@ FILTER_SOURCE_PATTERNS =
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
-USE_MDFILE_AS_MAINPAGE = doc/common/main_page.md
+USE_MDFILE_AS_MAINPAGE = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/main_page.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
@@ -930,7 +987,7 @@ REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
@@ -988,13 +1045,6 @@ VERBATIM_HEADERS = YES
ALPHABETICAL_INDEX = YES
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
@@ -1007,7 +1057,7 @@ IGNORE_PREFIX =
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
@@ -1045,7 +1095,7 @@ HTML_FILE_EXTENSION = .html
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_HEADER = doc/common/header.html
+HTML_HEADER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/header.html
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
@@ -1055,7 +1105,7 @@ HTML_HEADER = doc/common/header.html
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_FOOTER = doc/common/footer.html
+HTML_FOOTER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
@@ -1067,18 +1117,20 @@ HTML_FOOTER = doc/common/footer.html
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_STYLESHEET = doc/common/stylesheet.css
+HTML_STYLESHEET =
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
-# defined cascading style sheet that is included after the standard style sheets
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet file to the output directory. For an example
-# see the documentation.
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_EXTRA_STYLESHEET =
+HTML_EXTRA_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
@@ -1090,8 +1142,13 @@ HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
+# Default here is AUTO_LIGHT which means "Automatically set the mode according
+# to the user preference, use light mode if no preference is set".
+# Force it to LIGHT (white), as the rest of the documentation is white.
+HTML_COLORSTYLE = LIGHT
+
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
+# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
@@ -1122,8 +1179,9 @@ HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
@@ -1219,28 +1277,29 @@ GENERATE_HTMLHELP = NO
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
@@ -1353,7 +1412,7 @@ DISABLE_INDEX = YES
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
@@ -1381,7 +1440,7 @@ ENUM_VALUES_PER_LINE = 4
TREEVIEW_WIDTH = 250
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
@@ -1397,20 +1456,9 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
@@ -1419,6 +1467,17 @@ FORMULA_TRANSPARENT = YES
USE_MATHJAX = YES
+# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
+# Note that the different versions of MathJax have different requirements with
+# regards to the different settings, so it is possible that also other MathJax
+# settings have to be changed when switching between the different MathJax
+# versions.
+# Possible values are: MathJax_2 and MathJax_3.
+# The default value is: MathJax_2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+@GUDHI_DOXYGEN_MATHJAX_VERSION@
+
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
@@ -1440,14 +1499,14 @@ MATHJAX_FORMAT = HTML-CSS
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
-MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2
+MATHJAX_RELPATH =
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
-MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+MATHJAX_EXTENSIONS = @GUDHI_DOXYGEN_MATHJAX_EXTENSIONS@
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
@@ -1480,11 +1539,11 @@ SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
-# are two flavours of web server based searching depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools. See
-# the section "External Indexing and Searching" for details.
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
@@ -1496,7 +1555,7 @@ SERVER_BASED_SEARCH = NO
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
@@ -1509,7 +1568,7 @@ EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
@@ -1547,7 +1606,7 @@ EXTRA_SEARCH_MAPPINGS =
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
@@ -1578,7 +1637,7 @@ LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
@@ -1596,13 +1655,18 @@ COMPACT_LATEX = NO
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
-EXTRA_PACKAGES = amsfonts amsmath amssymb
+EXTRA_PACKAGES = amsfonts \
+ amsmath \
+ amssymb
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
@@ -1612,23 +1676,36 @@ EXTRA_PACKAGES = amsfonts amsmath amssymb
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
-# replace them by respectively the title of the page, the current date and time,
-# only the current date, the version number of doxygen, the project name (see
-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer.
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
@@ -1646,8 +1723,8 @@ LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@@ -1670,16 +1747,6 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE = NO
-
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@@ -1688,11 +1755,19 @@ LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
@@ -1707,7 +1782,7 @@ GENERATE_RTF = NO
RTF_OUTPUT = rtf
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
@@ -1748,7 +1823,7 @@ RTF_EXTENSIONS_FILE =
# Configuration options related to the man page output
#---------------------------------------------------------------------------
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
@@ -1772,6 +1847,13 @@ MAN_OUTPUT = man
MAN_EXTENSION = .3
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
@@ -1785,7 +1867,7 @@ MAN_LINKS = NO
# Configuration options related to the XML output
#---------------------------------------------------------------------------
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
@@ -1799,7 +1881,7 @@ GENERATE_XML = NO
XML_OUTPUT = xml
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
@@ -1812,7 +1894,7 @@ XML_PROGRAMLISTING = YES
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
@@ -1830,10 +1912,10 @@ DOCBOOK_OUTPUT = docbook
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
@@ -1842,7 +1924,7 @@ GENERATE_AUTOGEN_DEF = NO
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
@@ -1850,7 +1932,7 @@ GENERATE_AUTOGEN_DEF = NO
GENERATE_PERLMOD = NO
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
@@ -1858,9 +1940,9 @@ GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
+# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
@@ -1880,14 +1962,14 @@ PERLMOD_MAKEVAR_PREFIX =
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
@@ -1903,7 +1985,7 @@ MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
@@ -1945,9 +2027,9 @@ PREDEFINED = protected=private
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
-# all uppercase name, and do not end with a semicolon. Such function macros are
-# typically used for boiler-plate code, and will confuse the parser if not
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
@@ -1967,7 +2049,7 @@ SKIP_FUNCTION_MACROS = YES
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
-# Note: Each tag file must have an unique name (where the name does NOT include
+# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
@@ -1979,53 +2061,32 @@ TAGFILES =
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
# The default value is: NO.
ALLEXTERNALS = NO
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH = /usr/bin/perl
-
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
+@GUDHI_DOXYGEN_CLASS_DIAGRAMS@
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
@@ -2034,7 +2095,7 @@ MSCGEN_PATH =
DIA_PATH =
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
@@ -2045,7 +2106,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
-# The default value is: NO.
+# The default value is: YES.
HAVE_DOT = YES
@@ -2059,26 +2120,38 @@ HAVE_DOT = YES
DOT_NUM_THREADS = 0
-# When you want a differently looking font n the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
+# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of
+# subgraphs. When you want a differently looking font in the dot files that
+# doxygen generates you can specify fontname, fontcolor and fontsize attributes.
+# For details please see <a href=https://graphviz.org/doc/info/attrs.html>Node,
+# Edge and Graph Attributes specification</a> You need to make sure dot is able
+# to find the font, which can be done by putting it in a standard location or by
+# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font. Default graphviz fontsize is 14.
+# The default value is: fontname=Helvetica,fontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTNAME = Helvetica
+DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
+# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can
+# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. <a
+# href=https://graphviz.org/doc/info/arrows.html>Complete documentation about
+# arrows shapes.</a>
+# The default value is: labelfontname=Helvetica,labelfontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTSIZE = 10
+DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
+# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes
+# around nodes set 'shape=plain' or 'shape=plaintext' <a
+# href=https://www.graphviz.org/doc/info/shapes.html>Shapes specification</a>
+# The default value is: shape=box,height=0.2,width=0.4.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
+
+# You can set the path where dot can find font specified with fontname in
+# DOT_COMMON_ATTR and others dot attributes.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
@@ -2107,7 +2180,7 @@ COLLABORATION_GRAPH = NO
GROUP_GRAPHS = YES
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
@@ -2159,7 +2232,8 @@ INCLUDED_BY_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2170,7 +2244,8 @@ CALL_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2193,11 +2268,17 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
-# Possible values are: png, jpg, gif and svg.
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2240,6 +2321,24 @@ MSCFILE_DIRS =
DIAFILE_DIRS =
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
@@ -2264,19 +2363,7 @@ DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
@@ -2293,7 +2380,7 @@ DOT_MULTI_TARGETS = YES
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
diff --git a/src/GudhUI/model/Model.h b/src/GudhUI/model/Model.h
index dd9bdaab..626ef59d 100644
--- a/src/GudhUI/model/Model.h
+++ b/src/GudhUI/model/Model.h
@@ -60,7 +60,7 @@ class CGAL_geometric_flag_complex_wrapper {
void maximal_face(std::vector<int> vertices) {
if (!load_only_points_) {
- // std::cout << "size:" << vertices.size() << std::endl;
+ // std::clog << "size:" << vertices.size() << std::endl;
for (std::size_t i = 0; i < vertices.size(); ++i)
for (std::size_t j = i + 1; j < vertices.size(); ++j)
complex_.add_edge_without_blockers(Vertex_handle(vertices[i]), Vertex_handle(vertices[j]));
@@ -178,7 +178,7 @@ class Model {
void contract_edges(unsigned num_contractions) {
Gudhi::Clock c;
Edge_contractor<Complex> contractor(complex_, num_contractions);
- std::cout << "Time to simplify: " << c.num_seconds() << "s" << std::endl;
+ std::clog << "Time to simplify: " << c.num_seconds() << "s" << std::endl;
}
void collapse_vertices(unsigned num_collapses) {
@@ -192,14 +192,14 @@ class Model {
}
void show_graph_stats() {
- std::cout << "++++++ Graph stats +++++++" << std::endl;
- std::cout << "Num vertices : " << complex_.num_vertices() << std::endl;
- std::cout << "Num edges : " << complex_.num_edges() << std::endl;
- std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl;
- std::cout << "Min/avg/max degree : " << min_degree() << "/" << avg_degree() << "/" << max_degree() << std::endl;
- std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl;
- std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl;
- std::cout << "+++++++++++++++++++++++++" << std::endl;
+ std::clog << "++++++ Graph stats +++++++" << std::endl;
+ std::clog << "Num vertices : " << complex_.num_vertices() << std::endl;
+ std::clog << "Num edges : " << complex_.num_edges() << std::endl;
+ std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl;
+ std::clog << "Min/avg/max degree : " << min_degree() << "/" << avg_degree() << "/" << max_degree() << std::endl;
+ std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl;
+ std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl;
+ std::clog << "+++++++++++++++++++++++++" << std::endl;
}
private:
@@ -226,11 +226,11 @@ class Model {
public:
void show_complex_stats() {
- std::cout << "++++++ Mesh stats +++++++" << std::endl;
- std::cout << "Num vertices : " << complex_.num_vertices() << std::endl;
- std::cout << "Num edges : " << complex_.num_edges() << std::endl;
- std::cout << "Num connected components : " << complex_.num_connected_components() << std::endl;
- std::cout << "+++++++++++++++++++++++++" << std::endl;
+ std::clog << "++++++ Mesh stats +++++++" << std::endl;
+ std::clog << "Num vertices : " << complex_.num_vertices() << std::endl;
+ std::clog << "Num edges : " << complex_.num_edges() << std::endl;
+ std::clog << "Num connected components : " << complex_.num_connected_components() << std::endl;
+ std::clog << "+++++++++++++++++++++++++" << std::endl;
}
void show_complex_dimension() {
@@ -247,18 +247,18 @@ class Model {
euler -= 1;
}
clock.end();
- std::cout << "++++++ Mesh dimension +++++++" << std::endl;
- std::cout << "Dimension : " << dimension << std::endl;
- std::cout << "Euler characteristic : " << euler << std::endl;
- std::cout << "Num simplices : " << num_simplices << std::endl;
- std::cout << "Total time: " << clock << std::endl;
- std::cout << "Time per simplex: " << clock.num_seconds() / num_simplices << " s" << std::endl;
- std::cout << "+++++++++++++++++++++++++" << std::endl;
+ std::clog << "++++++ Mesh dimension +++++++" << std::endl;
+ std::clog << "Dimension : " << dimension << std::endl;
+ std::clog << "Euler characteristic : " << euler << std::endl;
+ std::clog << "Num simplices : " << num_simplices << std::endl;
+ std::clog << "Total time: " << clock << std::endl;
+ std::clog << "Time per simplex: " << clock.num_seconds() / num_simplices << " s" << std::endl;
+ std::clog << "+++++++++++++++++++++++++" << std::endl;
}
void show_homology_group() {
#ifdef _WIN32
- std::cout << "Works only on linux x64 for the moment\n";
+ std::clog << "Works only on linux x64 for the moment\n";
#else
Gudhi::Clock clock;
run_chomp();
@@ -278,16 +278,16 @@ class Model {
else
euler -= 1;
}
- std::cout << "Saw " << num_simplices << " simplices with maximum dimension " << dimension << std::endl;
- std::cout << "The euler characteristic is : " << euler << std::endl;
+ std::clog << "Saw " << num_simplices << " simplices with maximum dimension " << dimension << std::endl;
+ std::clog << "The euler characteristic is : " << euler << std::endl;
}
void show_persistence(int p, double threshold, int max_dim, double min_pers) {
- Persistence_compute<Complex> persistence(complex_, std::cout, Persistence_params(p, threshold, max_dim, min_pers));
+ Persistence_compute<Complex> persistence(complex_, std::clog, Persistence_params(p, threshold, max_dim, min_pers));
}
void show_critical_points(double max_distance) {
- Critical_points<Complex> critical_points(complex_, std::cout, max_distance);
+ Critical_points<Complex> critical_points(complex_, std::clog, max_distance);
}
void show_is_manifold() {
@@ -296,12 +296,12 @@ class Model {
Is_manifold<Complex> test_manifold(complex_, dim, is_manifold);
if (is_manifold) {
- std::cout << "The complex is a " << dim << "-manifold\n";
+ std::clog << "The complex is a " << dim << "-manifold\n";
} else {
if (dim < 4) {
- std::cout << "The complex has dimension greater than " << dim << " and is not a manifold\n";
+ std::clog << "The complex has dimension greater than " << dim << " and is not a manifold\n";
} else {
- std::cout << "The complex has dimension>=4 and may or may not be a manifold\n";
+ std::clog << "The complex has dimension>=4 and may or may not be a manifold\n";
}
}
}
@@ -309,10 +309,10 @@ class Model {
private:
void run_chomp() {
save_complex_in_file_for_chomp();
- std::cout << "Call CHOMP library\n";
+ std::clog << "Call CHOMP library\n";
int returnValue = system("homsimpl chomp.sim");
if (returnValue != 0) {
- std::cout << "homsimpl (from CHOMP) failed. Please check it is installed or available in the PATH."
+ std::cerr << "homsimpl (from CHOMP) failed. Please check it is installed or available in the PATH."
<< std::endl;
}
}
diff --git a/src/GudhUI/todo.txt b/src/GudhUI/todo.txt
index 19d99a77..e59d06d4 100644
--- a/src/GudhUI/todo.txt
+++ b/src/GudhUI/todo.txt
@@ -18,5 +18,5 @@ x faire le lien MainWindow - Model
-- bug
-x bug ordre contraction -> just that first vertex placement dont work great
+x bug ordre contraction -> just that first vertex placement doesn't work great
x pb construction rips
diff --git a/src/GudhUI/utils/Bar_code_persistence.h b/src/GudhUI/utils/Bar_code_persistence.h
index cd9b009f..b526017a 100644
--- a/src/GudhUI/utils/Bar_code_persistence.h
+++ b/src/GudhUI/utils/Bar_code_persistence.h
@@ -58,13 +58,13 @@ class Bar_code_persistence {
QGraphicsScene * scene = new QGraphicsScene();
view->setScene(scene);
double ratio = 600.0 / (max_death - min_birth);
- // std::cout << "min_birth=" << min_birth << " - max_death=" << max_death << " - ratio=" << ratio << std::endl;
+ // std::clog << "min_birth=" << min_birth << " - max_death=" << max_death << " - ratio=" << ratio << std::endl;
double height = 0.0, birth = 0.0, death = 0.0;
int pers_num = 1;
for (auto& persistence : persistence_vector) {
height = 5.0 * pers_num;
- // std::cout << "[" << pers_num << "] birth=" << persistence.first << " - death=" << persistence.second << std::endl;
+ // std::clog << "[" << pers_num << "] birth=" << persistence.first << " - death=" << persistence.second << std::endl;
if (std::isfinite(persistence.first))
birth = ((persistence.first - min_birth) * ratio) + 50.0;
else
diff --git a/src/GudhUI/utils/Critical_points.h b/src/GudhUI/utils/Critical_points.h
index 32fcf32e..65695434 100644
--- a/src/GudhUI/utils/Critical_points.h
+++ b/src/GudhUI/utils/Critical_points.h
@@ -65,7 +65,7 @@ template<typename SkBlComplex> class Critical_points {
void anti_collapse_edges(const std::deque<Edge>& edges) {
unsigned pos = 0;
for (Edge e : edges) {
- std::cout << "edge " << pos++ << "/" << edges.size() << "\n";
+ std::clog << "edge " << pos++ << "/" << edges.size() << "\n";
auto eh = filled_complex_.add_edge_without_blockers(e.first, e.second);
int is_contractible(is_link_reducible(eh));
@@ -103,7 +103,7 @@ template<typename SkBlComplex> class Critical_points {
// reduced to one point -> contractible
return 1;
else
- // we dont know
+ // we don't know
return 2;
}
diff --git a/src/GudhUI/utils/Edge_contractor.h b/src/GudhUI/utils/Edge_contractor.h
index 0707b186..a71d0742 100644
--- a/src/GudhUI/utils/Edge_contractor.h
+++ b/src/GudhUI/utils/Edge_contractor.h
@@ -65,7 +65,7 @@ template<typename SkBlComplex> class Edge_contractor {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Edge_contractor(SkBlComplex& complex, unsigned num_contractions) :
complex_(complex), num_contractions_(num_contractions) {
diff --git a/src/GudhUI/utils/Furthest_point_epsilon_net.h b/src/GudhUI/utils/Furthest_point_epsilon_net.h
index 6eb71071..195d0014 100644
--- a/src/GudhUI/utils/Furthest_point_epsilon_net.h
+++ b/src/GudhUI/utils/Furthest_point_epsilon_net.h
@@ -27,7 +27,7 @@ template<typename SkBlComplex> class Furthest_point_epsilon_net {
/**
* Let V be the set of vertices.
- * Initially v0 is one arbitrarly vertex and the set V0 is {v0}.
+ * Initially v0 is one, arbitrary, vertex and the set V0 is {v0}.
* Then Vk is computed as follows.
* First we compute the vertex pk that is the furthest from Vk
* then Vk = Vk \cup pk.
@@ -54,7 +54,7 @@ template<typename SkBlComplex> class Furthest_point_epsilon_net {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Furthest_point_epsilon_net(SkBlComplex& complex) :
complex_(complex) {
diff --git a/src/GudhUI/utils/K_nearest_builder.h b/src/GudhUI/utils/K_nearest_builder.h
index 34483e58..454b2587 100644
--- a/src/GudhUI/utils/K_nearest_builder.h
+++ b/src/GudhUI/utils/K_nearest_builder.h
@@ -41,7 +41,7 @@ template<typename SkBlComplex> class K_nearest_builder {
public:
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
K_nearest_builder(SkBlComplex& complex, unsigned k) : complex_(complex) {
complex.keep_only_vertices();
diff --git a/src/GudhUI/utils/Lloyd_builder.h b/src/GudhUI/utils/Lloyd_builder.h
index c042564f..57e3dc0f 100644
--- a/src/GudhUI/utils/Lloyd_builder.h
+++ b/src/GudhUI/utils/Lloyd_builder.h
@@ -27,7 +27,7 @@ template<typename SkBlComplex> class Lloyd_builder {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Lloyd_builder(SkBlComplex& complex, unsigned num_iterations) : complex_(complex), dim(-1) {
if (!complex_.empty()) {
diff --git a/src/GudhUI/utils/Rips_builder.h b/src/GudhUI/utils/Rips_builder.h
index aba1a8e4..0300190c 100644
--- a/src/GudhUI/utils/Rips_builder.h
+++ b/src/GudhUI/utils/Rips_builder.h
@@ -43,13 +43,13 @@ template<typename SkBlComplex> class Rips_builder {
void compute_edges(double alpha) {
auto vertices = complex_.vertex_range();
for (auto p = vertices.begin(); p != vertices.end(); ++p) {
- std::cout << *p << " ";
- std::cout.flush();
+ std::clog << *p << " ";
+ std::clog.flush();
for (auto q = p; ++q != vertices.end(); /**/)
if (squared_eucl_distance(complex_.point(*p), complex_.point(*q)) < 4 * alpha * alpha)
complex_.add_edge_without_blockers(*p, *q);
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
};
diff --git a/src/GudhUI/utils/Vertex_collapsor.h b/src/GudhUI/utils/Vertex_collapsor.h
index 030e4bb0..b1c48efd 100644
--- a/src/GudhUI/utils/Vertex_collapsor.h
+++ b/src/GudhUI/utils/Vertex_collapsor.h
@@ -31,7 +31,7 @@ template<typename SkBlComplex> class Vertex_collapsor {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Vertex_collapsor(SkBlComplex& complex, size_t num_collapses) :
complex_(complex), num_collapses_(num_collapses) {
diff --git a/src/GudhUI/view/View_parameter.h b/src/GudhUI/view/View_parameter.h
index dfd3aa41..3671f4fb 100644
--- a/src/GudhUI/view/View_parameter.h
+++ b/src/GudhUI/view/View_parameter.h
@@ -52,13 +52,13 @@ class View_parameter {
void change_vertex_mode() {
int current_value = vertex_mode;
vertex_mode = static_cast<VERTEX_MODE> (++current_value % V_COUNT);
- std::cout << "Vertex mode : ";
+ std::clog << "Vertex mode : ";
switch (vertex_mode) {
case V_NONE:
- std::cout << "empty\n";
+ std::clog << "empty\n";
break;
case V_SIMPLE:
- std::cout << "simple\n";
+ std::clog << "simple\n";
break;
default:
break;
diff --git a/src/GudhUI/view/Viewer.cpp b/src/GudhUI/view/Viewer.cpp
index 6b17c833..2c00f86f 100644
--- a/src/GudhUI/view/Viewer.cpp
+++ b/src/GudhUI/view/Viewer.cpp
@@ -31,7 +31,11 @@ void Viewer::set_bounding_box(const Point_3 & lower_left, const Point_3 & upper_
}
void Viewer::update_GL() {
+#if QGLVIEWER_VERSION >= 0x020700
+ this->update();
+#else
this->updateGL();
+#endif
}
void Viewer::init_scene() {
diff --git a/src/GudhUI/view/Viewer_instructor.h b/src/GudhUI/view/Viewer_instructor.h
index 58cbcd31..09ed102f 100644
--- a/src/GudhUI/view/Viewer_instructor.h
+++ b/src/GudhUI/view/Viewer_instructor.h
@@ -11,7 +11,7 @@
#ifndef VIEW_VIEWER_INSTRUCTOR_H_
#define VIEW_VIEWER_INSTRUCTOR_H_
-// todo do a viewer instructor that have directely a pointer to a QGLviewer and buffer ot not triangles
+// todo do a viewer instructor that has directly a pointer to a QGLviewer and buffer ot not triangles
#include <QFileDialog>
#include <QKeyEvent>
diff --git a/src/Hasse_complex/include/gudhi/Hasse_complex.h b/src/Hasse_complex/include/gudhi/Hasse_complex.h
index 209fd0b9..8ce8c36f 100644
--- a/src/Hasse_complex/include/gudhi/Hasse_complex.h
+++ b/src/Hasse_complex/include/gudhi/Hasse_complex.h
@@ -173,9 +173,9 @@ class Hasse_complex {
}
void display_simplex(Simplex_handle sh) {
- std::cout << dimension(sh) << " ";
- for (auto sh_b : boundary_simplex_range(sh)) std::cout << sh_b << " ";
- std::cout << " " << filtration(sh) << " key=" << key(sh);
+ std::clog << dimension(sh) << " ";
+ for (auto sh_b : boundary_simplex_range(sh)) std::clog << sh_b << " ";
+ std::clog << " " << filtration(sh) << " key=" << key(sh);
}
void initialize_filtration() {
diff --git a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
index f9441b24..e1ab7cb3 100644
--- a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
+++ b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
@@ -24,7 +24,7 @@ namespace cover_complex {
* Visualizations of the simplicial complexes can be done with either
* neato (from <a target="_blank" href="http://www.graphviz.org/">graphviz</a>),
* <a target="_blank" href="http://www.geomview.org/">geomview</a>,
- * <a target="_blank" href="https://github.com/MLWave/kepler-mapper">KeplerMapper</a>.
+ * <a target="_blank" href="https://github.com/scikit-tda/kepler-mapper">KeplerMapper</a>.
* Input point clouds are assumed to be \ref FileFormatsOFF "OFF files"
*
* \section covers Covers
@@ -53,7 +53,7 @@ namespace cover_complex {
* covering the height function (coordinate 2),
* which are then refined into their connected components using the triangulation of the .OFF file.
*
- * \include Nerve_GIC/Nerve.cpp
+ * \include Nerve.cpp
*
* When launching:
*
@@ -62,7 +62,7 @@ namespace cover_complex {
*
* the program output is:
*
- * \include Nerve_GIC/Nerve.txt
+ * \include Nerve.txt
*
* The program also writes a file ../../data/points/human_sc.txt. The first three lines in this file are the location
* of the input point cloud and the function used to compute the cover.
@@ -96,7 +96,7 @@ namespace cover_complex {
* comes from the triangulation of the human shape. Note that the resulting simplicial complex is in dimension 3
* in this example.
*
- * \include Nerve_GIC/VoronoiGIC.cpp
+ * \include VoronoiGIC.cpp
*
* When launching:
*
@@ -129,7 +129,7 @@ namespace cover_complex {
* with automatic resolution and gain. Note that automatic threshold, resolution and gain
* can be computed as well for the Nerve.
*
- * \include Nerve_GIC/CoordGIC.cpp
+ * \include CoordGIC.cpp
*
* When launching:
*
@@ -152,7 +152,7 @@ namespace cover_complex {
* The function is now the first eigenfunction given by PCA, whose values
* are written in a file (lucky_cat_PCA1). Threshold, resolution and gain are automatically selected as before.
*
- * \include Nerve_GIC/FuncGIC.cpp
+ * \include FuncGIC.cpp
*
* When launching:
*
diff --git a/src/Nerve_GIC/example/CMakeLists.txt b/src/Nerve_GIC/example/CMakeLists.txt
index 1667472f..9faf1f3b 100644
--- a/src/Nerve_GIC/example/CMakeLists.txt
+++ b/src/Nerve_GIC/example/CMakeLists.txt
@@ -1,28 +1,21 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( CoordGIC CoordGIC.cpp )
+add_executable ( FuncGIC FuncGIC.cpp )
- add_executable ( CoordGIC CoordGIC.cpp )
- add_executable ( FuncGIC FuncGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(CoordGIC ${TBB_LIBRARIES})
+ target_link_libraries(FuncGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(CoordGIC ${TBB_LIBRARIES})
- target_link_libraries(FuncGIC ${TBB_LIBRARIES})
- endif()
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
- add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
-
- add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
-
- install(TARGETS CoordGIC DESTINATION bin)
- install(TARGETS FuncGIC DESTINATION bin)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
diff --git a/src/Nerve_GIC/example/CoordGIC.cpp b/src/Nerve_GIC/example/CoordGIC.cpp
index fd9c224a..f0afdca5 100644
--- a/src/Nerve_GIC/example/CoordGIC.cpp
+++ b/src/Nerve_GIC/example/CoordGIC.cpp
@@ -40,7 +40,7 @@ int main(int argc, char **argv) {
bool check = GIC.read_point_cloud(off_file_name);
if (!check) {
- std::cout << "Incorrect OFF file." << std::endl;
+ std::clog << "Incorrect OFF file." << std::endl;
} else {
GIC.set_type("GIC");
@@ -67,15 +67,15 @@ int main(int argc, char **argv) {
// --------------------------------------------
if (verb) {
- std::cout << "Coordinate GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices()
+ std::clog << "Coordinate GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices()
<< " simplices - " << stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on coordinate GIC simplices" << std::endl;
+ std::clog << "Iterator on coordinate GIC simplices" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
}
diff --git a/src/Nerve_GIC/example/FuncGIC.cpp b/src/Nerve_GIC/example/FuncGIC.cpp
index 5a323795..518e1826 100644
--- a/src/Nerve_GIC/example/FuncGIC.cpp
+++ b/src/Nerve_GIC/example/FuncGIC.cpp
@@ -41,7 +41,7 @@ int main(int argc, char **argv) {
bool check = GIC.read_point_cloud(off_file_name);
if (!check) {
- std::cout << "Incorrect OFF file." << std::endl;
+ std::clog << "Incorrect OFF file." << std::endl;
} else {
GIC.set_type("GIC");
@@ -65,15 +65,15 @@ int main(int argc, char **argv) {
// --------------------------------------------
if (verb) {
- std::cout << "Functional GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices()
+ std::clog << "Functional GIC is of dimension " << stree.dimension() << " - " << stree.num_simplices()
<< " simplices - " << stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on functional GIC simplices" << std::endl;
+ std::clog << "Iterator on functional GIC simplices" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
}
diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h
index 2a6d4788..047fba61 100644
--- a/src/Nerve_GIC/include/gudhi/GIC.h
+++ b/src/Nerve_GIC/include/gudhi/GIC.h
@@ -17,6 +17,14 @@
#include <mutex>
#endif
+#if __has_include(<CGAL/version.h>)
+# define GUDHI_GIC_USE_CGAL 1
+# include <gudhi/Bottleneck.h>
+#elif __has_include(<hera/bottleneck.h>)
+# define GUDHI_GIC_USE_HERA 1
+# include <hera/bottleneck.h>
+#endif
+
#include <gudhi/Debug_utils.h>
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/reader_utils.h>
@@ -25,7 +33,6 @@
#include <gudhi/Points_off_io.h>
#include <gudhi/distance_functions.h>
#include <gudhi/Persistent_cohomology.h>
-#include <gudhi/Bottleneck.h>
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
@@ -35,8 +42,6 @@
#include <boost/graph/subgraph.hpp>
#include <boost/graph/graph_utility.hpp>
-#include <CGAL/version.h> // for CGAL_VERSION_NR
-
#include <iostream>
#include <vector>
#include <map>
@@ -139,19 +144,9 @@ class Cover_complex {
for (boost::tie(ei, ei_end) = boost::edges(G); ei != ei_end; ++ei) boost::remove_edge(*ei, G);
}
- // Thread local is not available on XCode version < V.8
- // If not available, random engine is a class member.
-#ifndef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::default_random_engine re;
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
-
// Find random number in [0,1].
double GetUniform() {
- // Thread local is not available on XCode version < V.8
- // If available, random engine is defined for each thread.
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
thread_local std::default_random_engine re;
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
std::uniform_real_distribution<double> Dist(0, 1);
return Dist(re);
}
@@ -344,7 +339,7 @@ class Cover_complex {
if (num_edges(one_skeleton_OFF))
one_skeleton = one_skeleton_OFF;
else
- std::cout << "No triangulation read in OFF file!" << std::endl;
+ std::cerr << "No triangulation read in OFF file!" << std::endl;
}
public: // Set graph from Rips complex.
@@ -407,7 +402,7 @@ class Cover_complex {
std::ifstream input(distance, std::ios::out | std::ios::binary);
if (input.good()) {
- if (verbose) std::cout << "Reading distances..." << std::endl;
+ if (verbose) std::clog << "Reading distances..." << std::endl;
for (int i = 0; i < n; i++) {
for (int j = i; j < n; j++) {
input.read((char*)&d, 8);
@@ -417,12 +412,12 @@ class Cover_complex {
}
input.close();
} else {
- if (verbose) std::cout << "Computing distances..." << std::endl;
+ if (verbose) std::clog << "Computing distances..." << std::endl;
input.close();
std::ofstream output(distance, std::ios::out | std::ios::binary);
for (int i = 0; i < n; i++) {
int state = (int)floor(100 * (i * 1.0 + 1) / n) % 10;
- if (state == 0 && verbose) std::cout << "\r" << state << "%" << std::flush;
+ if (state == 0 && verbose) std::clog << "\r" << state << "%" << std::flush;
for (int j = i; j < n; j++) {
double dis = ref_distance(point_cloud[i], point_cloud[j]);
distances[i][j] = dis;
@@ -431,7 +426,7 @@ class Cover_complex {
}
}
output.close();
- if (verbose) std::cout << std::endl;
+ if (verbose) std::clog << std::endl;
}
}
@@ -451,14 +446,12 @@ class Cover_complex {
m = (std::min)(m, n - 1);
double delta = 0;
- if (verbose) std::cout << n << " points in R^" << data_dimension << std::endl;
- if (verbose) std::cout << "Subsampling " << m << " points" << std::endl;
+ if (verbose) std::clog << n << " points in R^" << data_dimension << std::endl;
+ if (verbose) std::clog << "Subsampling " << m << " points" << std::endl;
if (distances.size() == 0) compute_pairwise_distances(distance);
- // This cannot be parallelized if thread_local is not defined
- // thread_local is not defined for XCode < v.8
- #if defined(GUDHI_USE_TBB) && defined(GUDHI_CAN_USE_CXX11_THREAD_LOCAL)
+ #ifdef GUDHI_USE_TBB
std::mutex deltamutex;
tbb::parallel_for(0, N, [&](int i){
std::vector<int> samples(m);
@@ -487,7 +480,7 @@ class Cover_complex {
}
#endif
- if (verbose) std::cout << "delta = " << delta << std::endl;
+ if (verbose) std::clog << "delta = " << delta << std::endl;
set_graph_from_rips(delta, distance);
return delta;
}
@@ -530,7 +523,7 @@ class Cover_complex {
cover_name = "coordinate " + std::to_string(k);
}
else{
- std::cout << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl;
+ std::cerr << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl;
for (int i = 0; i < n; i++) func.push_back(0.0);
functional_cover = true;
cover_name = "null";
@@ -563,11 +556,11 @@ class Cover_complex {
*/
double set_automatic_resolution() {
if (!functional_cover) {
- std::cout << "Cover needs to come from the preimages of a function." << std::endl;
+ std::cerr << "Cover needs to come from the preimages of a function." << std::endl;
return 0;
}
if (type != "Nerve" && type != "GIC") {
- std::cout << "Type of complex needs to be specified." << std::endl;
+ std::cerr << "Type of complex needs to be specified." << std::endl;
return 0;
}
@@ -579,7 +572,7 @@ class Cover_complex {
for (boost::tie(ei, ei_end) = boost::edges(one_skeleton); ei != ei_end; ++ei)
reso = (std::max)(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] -
func[index[boost::target(*ei, one_skeleton)]]));
- if (verbose) std::cout << "resolution = " << reso << std::endl;
+ if (verbose) std::clog << "resolution = " << reso << std::endl;
resolution_double = reso;
}
@@ -589,7 +582,7 @@ class Cover_complex {
reso = (std::max)(reso, std::abs(func[index[boost::source(*ei, one_skeleton)]] -
func[index[boost::target(*ei, one_skeleton)]]) /
gain);
- if (verbose) std::cout << "resolution = " << reso << std::endl;
+ if (verbose) std::clog << "resolution = " << reso << std::endl;
resolution_double = reso;
}
@@ -622,11 +615,11 @@ class Cover_complex {
*/
void set_cover_from_function() {
if (resolution_double == -1 && resolution_int == -1) {
- std::cout << "Number and/or length of intervals not specified" << std::endl;
+ std::cerr << "Number and/or length of intervals not specified" << std::endl;
return;
}
if (gain == -1) {
- std::cout << "Gain not specified" << std::endl;
+ std::cerr << "Gain not specified" << std::endl;
return;
}
@@ -637,7 +630,7 @@ class Cover_complex {
minf = (std::min)(minf, func[i]);
maxf = (std::max)(maxf, func[i]);
}
- if (verbose) std::cout << "Min function value = " << minf << " and Max function value = " << maxf << std::endl;
+ if (verbose) std::clog << "Min function value = " << minf << " and Max function value = " << maxf << std::endl;
// Compute cover of im(f)
std::vector<std::pair<double, double> > intervals;
@@ -663,7 +656,7 @@ class Cover_complex {
res = intervals.size();
if (verbose) {
for (int i = 0; i < res; i++)
- std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
<< std::endl;
}
} else {
@@ -681,7 +674,7 @@ class Cover_complex {
res = intervals.size();
if (verbose) {
for (int i = 0; i < res; i++)
- std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
<< std::endl;
}
} else { // Case we use an integer and a double for the length of the intervals.
@@ -698,7 +691,7 @@ class Cover_complex {
res = intervals.size();
if (verbose) {
for (int i = 0; i < res; i++)
- std::cout << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
+ std::clog << "Interval " << i << " = [" << intervals[i].first << ", " << intervals[i].second << "]"
<< std::endl;
}
}
@@ -715,7 +708,7 @@ class Cover_complex {
std::map<int, std::vector<int> > preimages;
std::map<int, double> funcstd;
- if (verbose) std::cout << "Computing preimages..." << std::endl;
+ if (verbose) std::clog << "Computing preimages..." << std::endl;
for (int i = 0; i < res; i++) {
// Find points in the preimage
std::pair<double, double> inter1 = intervals[i];
@@ -764,7 +757,7 @@ class Cover_complex {
}
#ifdef GUDHI_USE_TBB
- if (verbose) std::cout << "Computing connected components (parallelized)..." << std::endl;
+ if (verbose) std::clog << "Computing connected components (parallelized)..." << std::endl;
std::mutex covermutex, idmutex;
tbb::parallel_for(0, res, [&](int i){
// Compute connected components
@@ -800,7 +793,7 @@ class Cover_complex {
idmutex.unlock();
});
#else
- if (verbose) std::cout << "Computing connected components..." << std::endl;
+ if (verbose) std::clog << "Computing connected components..." << std::endl;
for (int i = 0; i < res; i++) {
// Compute connected components
Graph G = one_skeleton.create_subgraph();
@@ -894,7 +887,7 @@ class Cover_complex {
// Compute the geodesic distances to subsamples with Dijkstra
#ifdef GUDHI_USE_TBB
- if (verbose) std::cout << "Computing geodesic distances (parallelized)..." << std::endl;
+ if (verbose) std::clog << "Computing geodesic distances (parallelized)..." << std::endl;
std::mutex coverMutex; std::mutex mindistMutex;
tbb::parallel_for(0, m, [&](int i){
int seed = voronoi_subsamples[i];
@@ -916,7 +909,7 @@ class Cover_complex {
});
#else
for (int i = 0; i < m; i++) {
- if (verbose) std::cout << "Computing geodesic distances to seed " << i << "..." << std::endl;
+ if (verbose) std::clog << "Computing geodesic distances to seed " << i << "..." << std::endl;
int seed = voronoi_subsamples[i];
std::vector<double> dmap(n);
boost::dijkstra_shortest_paths(
@@ -991,7 +984,7 @@ class Cover_complex {
color_name.append(std::to_string(k));
}
else{
- std::cout << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl;
+ std::cerr << "Only pairwise distances provided---cannot access " << k << "th coordinate; returning null vector instead" << std::endl;
for (int i = 0; i < n; i++) func.push_back(0.0);
functional_cover = true;
cover_name = "null";
@@ -1054,7 +1047,7 @@ class Cover_complex {
}
graphic << "}";
graphic.close();
- std::cout << mapp << " file generated. It can be visualized with e.g. neato." << std::endl;
+ std::clog << mapp << " file generated. It can be visualized with e.g. neato." << std::endl;
}
public: // Create a .txt file that can be compiled with KeplerMapper.
@@ -1090,7 +1083,7 @@ class Cover_complex {
if (cover_color[simplices[i][0]].first > mask && cover_color[simplices[i][1]].first > mask)
graphic << name2id[simplices[i][0]] << " " << name2id[simplices[i][1]] << std::endl;
graphic.close();
- std::cout << mapp
+ std::clog << mapp
<< " generated. It can be visualized with e.g. python KeplerMapperVisuFromTxtFile.py and firefox."
<< std::endl;
}
@@ -1137,7 +1130,7 @@ class Cover_complex {
for (int i = 0; i < numfaces; i++)
graphic << 3 << " " << faces[i][0] << " " << faces[i][1] << " " << faces[i][2] << std::endl;
graphic.close();
- std::cout << mapp << " generated. It can be visualized with e.g. geomview." << std::endl;
+ std::clog << mapp << " generated. It can be visualized with e.g. geomview." << std::endl;
}
// *******************************************************************************************************************
@@ -1185,7 +1178,7 @@ class Cover_complex {
for (int i = 0; i < max_dim; i++) {
std::vector<std::pair<double, double> > bars = pcoh.intervals_in_dimension(i);
int num_bars = bars.size(); if(i == 0) num_bars -= 1;
- if(verbose) std::cout << num_bars << " interval(s) in dimension " << i << ":" << std::endl;
+ if(verbose) std::clog << num_bars << " interval(s) in dimension " << i << ":" << std::endl;
for (int j = 0; j < num_bars; j++) {
double birth = bars[j].first;
double death = bars[j].second;
@@ -1199,7 +1192,7 @@ class Cover_complex {
else
death = minf + (2 - death) * (maxf - minf);
PD.push_back(std::pair<double, double>(birth, death));
- if (verbose) std::cout << " [" << birth << ", " << death << "]" << std::endl;
+ if (verbose) std::clog << " [" << birth << ", " << death << "]" << std::endl;
}
}
return PD;
@@ -1213,11 +1206,9 @@ class Cover_complex {
*/
void compute_distribution(unsigned int N = 100) {
unsigned int sz = distribution.size();
- if (sz >= N) {
- std::cout << "Already done!" << std::endl;
- } else {
+ if (sz < N) {
for (unsigned int i = 0; i < N - sz; i++) {
- if (verbose) std::cout << "Computing " << i << "th bootstrap, bottleneck distance = ";
+ if (verbose) std::clog << "Computing " << i << "th bootstrap, bottleneck distance = ";
Cover_complex Cboot; Cboot.n = this->n; Cboot.data_dimension = this->data_dimension; Cboot.type = this->type; Cboot.functional_cover = true;
@@ -1242,8 +1233,15 @@ class Cover_complex {
Cboot.set_cover_from_function();
Cboot.find_simplices();
Cboot.compute_PD();
+#ifdef GUDHI_GIC_USE_CGAL
double db = Gudhi::persistence_diagram::bottleneck_distance(this->PD, Cboot.PD);
- if (verbose) std::cout << db << std::endl;
+#elif defined GUDHI_GIC_USE_HERA
+ double db = hera::bottleneckDistExact(this->PD, Cboot.PD);
+#else
+ double db;
+ throw std::logic_error("This function requires CGAL or Hera for the bottleneck distance.");
+#endif
+ if (verbose) std::clog << db << std::endl;
distribution.push_back(db);
}
@@ -1260,7 +1258,7 @@ class Cover_complex {
double compute_distance_from_confidence_level(double alpha) {
unsigned int N = distribution.size();
double d = distribution[std::floor(alpha * N)];
- if (verbose) std::cout << "Distance corresponding to confidence " << alpha << " is " << d << std::endl;
+ if (verbose) std::clog << "Distance corresponding to confidence " << alpha << " is " << d << std::endl;
return d;
}
@@ -1275,7 +1273,7 @@ class Cover_complex {
double level = 1;
for (unsigned int i = 0; i < N; i++)
if (distribution[i] >= d){ level = i * 1.0 / N; break; }
- if (verbose) std::cout << "Confidence level of distance " << d << " is " << level << std::endl;
+ if (verbose) std::clog << "Confidence level of distance " << d << " is " << level << std::endl;
return level;
}
@@ -1288,7 +1286,7 @@ class Cover_complex {
double distancemin = (std::numeric_limits<double>::max)(); int N = PD.size();
for (int i = 0; i < N; i++) distancemin = (std::min)(distancemin, 0.5 * std::abs(PD[i].second - PD[i].first));
double p_value = 1 - compute_confidence_level_from_distance(distancemin);
- if (verbose) std::cout << "p value = " << p_value << std::endl;
+ if (verbose) std::clog << "p value = " << p_value << std::endl;
return p_value;
}
@@ -1319,7 +1317,7 @@ class Cover_complex {
*/
void find_simplices() {
if (type != "Nerve" && type != "GIC") {
- std::cout << "Type of complex needs to be specified." << std::endl;
+ std::cerr << "Type of complex needs to be specified." << std::endl;
return;
}
diff --git a/src/Nerve_GIC/test/CMakeLists.txt b/src/Nerve_GIC/test/CMakeLists.txt
index 567bf43f..e012a178 100644
--- a/src/Nerve_GIC/test/CMakeLists.txt
+++ b/src/Nerve_GIC/test/CMakeLists.txt
@@ -1,15 +1,12 @@
project(Graph_induced_complex_tests)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_boost_test)
+include(GUDHI_boost_test)
- add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
- if (TBB_FOUND)
- target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
- endif()
+add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
+endif()
- file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- gudhi_add_boost_test(Nerve_GIC_test_unit)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+gudhi_add_boost_test(Nerve_GIC_test_unit)
diff --git a/src/Nerve_GIC/utilities/CMakeLists.txt b/src/Nerve_GIC/utilities/CMakeLists.txt
index 65a08d9a..4521a992 100644
--- a/src/Nerve_GIC/utilities/CMakeLists.txt
+++ b/src/Nerve_GIC/utilities/CMakeLists.txt
@@ -1,27 +1,23 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( Nerve Nerve.cpp )
+add_executable ( VoronoiGIC VoronoiGIC.cpp )
- add_executable ( Nerve Nerve.cpp )
- add_executable ( VoronoiGIC VoronoiGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve ${TBB_LIBRARIES})
+ target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(Nerve ${TBB_LIBRARIES})
- target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
- endif()
+file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
+ "human.off" "2" "10" "0.3")
- add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
- "human.off" "2" "10" "0.3")
+add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
+ "human.off" "100")
- add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
- "human.off" "100")
-
- install(TARGETS Nerve DESTINATION bin)
- install(TARGETS VoronoiGIC DESTINATION bin)
- install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+install(TARGETS Nerve DESTINATION bin)
+install(TARGETS VoronoiGIC DESTINATION bin)
+install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
diff --git a/src/Nerve_GIC/utilities/Nerve.cpp b/src/Nerve_GIC/utilities/Nerve.cpp
index d34e922c..7b09f89d 100644
--- a/src/Nerve_GIC/utilities/Nerve.cpp
+++ b/src/Nerve_GIC/utilities/Nerve.cpp
@@ -42,7 +42,7 @@ int main(int argc, char **argv) {
bool check = SC.read_point_cloud(off_file_name);
if (!check) {
- std::cout << "Incorrect OFF file." << std::endl;
+ std::clog << "Incorrect OFF file." << std::endl;
} else {
SC.set_type("Nerve");
@@ -67,15 +67,15 @@ int main(int argc, char **argv) {
// ----------------------------------------------------------------------------
if (verb) {
- std::cout << "Nerve is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
+ std::clog << "Nerve is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
<< stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on Nerve simplices" << std::endl;
+ std::clog << "Iterator on Nerve simplices" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
}
diff --git a/src/Nerve_GIC/utilities/VoronoiGIC.cpp b/src/Nerve_GIC/utilities/VoronoiGIC.cpp
index 0182c948..117c89fb 100644
--- a/src/Nerve_GIC/utilities/VoronoiGIC.cpp
+++ b/src/Nerve_GIC/utilities/VoronoiGIC.cpp
@@ -40,7 +40,7 @@ int main(int argc, char **argv) {
bool check = GIC.read_point_cloud(off_file_name);
if (!check) {
- std::cout << "Incorrect OFF file." << std::endl;
+ std::clog << "Incorrect OFF file." << std::endl;
} else {
GIC.set_type("GIC");
@@ -61,15 +61,15 @@ int main(int argc, char **argv) {
// ----------------------------------------------------------------------------
if (verb) {
- std::cout << "Graph induced complex is of dimension " << stree.dimension() << " - " << stree.num_simplices()
+ std::clog << "Graph induced complex is of dimension " << stree.dimension() << " - " << stree.num_simplices()
<< " simplices - " << stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on graph induced complex simplices" << std::endl;
+ std::clog << "Iterator on graph induced complex simplices" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
}
diff --git a/src/Nerve_GIC/utilities/km.py.COPYRIGHT b/src/Nerve_GIC/utilities/km.py.COPYRIGHT
index bef7b121..5358d287 100644
--- a/src/Nerve_GIC/utilities/km.py.COPYRIGHT
+++ b/src/Nerve_GIC/utilities/km.py.COPYRIGHT
@@ -1,7 +1,7 @@
km.py is a fork of https://github.com/MLWave/kepler-mapper.
Only the visualization part has been kept (Mapper part has been removed).
-This file has te following Copyright :
+This file has the following Copyright :
The MIT License (MIT)
diff --git a/src/Persistence_representations/example/CMakeLists.txt b/src/Persistence_representations/example/CMakeLists.txt
index a7c6ef39..997f85dc 100644
--- a/src/Persistence_representations/example/CMakeLists.txt
+++ b/src/Persistence_representations/example/CMakeLists.txt
@@ -3,30 +3,24 @@ project(Persistence_representations_example)
add_executable ( Persistence_representations_example_landscape_on_grid persistence_landscape_on_grid.cpp )
add_test(NAME Persistence_representations_example_landscape_on_grid
COMMAND $<TARGET_FILE:Persistence_representations_example_landscape_on_grid>)
-install(TARGETS Persistence_representations_example_landscape_on_grid DESTINATION bin)
add_executable ( Persistence_representations_example_landscape persistence_landscape.cpp )
add_test(NAME Persistence_representations_example_landscape
COMMAND $<TARGET_FILE:Persistence_representations_example_landscape>)
-install(TARGETS Persistence_representations_example_landscape DESTINATION bin)
add_executable ( Persistence_representations_example_intervals persistence_intervals.cpp )
add_test(NAME Persistence_representations_example_intervals
COMMAND $<TARGET_FILE:Persistence_representations_example_intervals>
"${CMAKE_SOURCE_DIR}/data/persistence_diagram/first.pers")
-install(TARGETS Persistence_representations_example_intervals DESTINATION bin)
add_executable ( Persistence_representations_example_vectors persistence_vectors.cpp )
add_test(NAME Persistence_representations_example_vectors
COMMAND $<TARGET_FILE:Persistence_representations_example_vectors>)
-install(TARGETS Persistence_representations_example_vectors DESTINATION bin)
add_executable ( Persistence_representations_example_heat_maps persistence_heat_maps.cpp )
add_test(NAME Persistence_representations_example_heat_maps
COMMAND $<TARGET_FILE:Persistence_representations_example_heat_maps>)
-install(TARGETS Persistence_representations_example_heat_maps DESTINATION bin)
add_executable ( Sliced_Wasserstein sliced_wasserstein.cpp )
add_test(NAME Sliced_Wasserstein
COMMAND $<TARGET_FILE:Sliced_Wasserstein>)
-install(TARGETS Sliced_Wasserstein DESTINATION bin)
diff --git a/src/Persistence_representations/example/persistence_heat_maps.cpp b/src/Persistence_representations/example/persistence_heat_maps.cpp
index 1bf3a637..9fd6779a 100644
--- a/src/Persistence_representations/example/persistence_heat_maps.cpp
+++ b/src/Persistence_representations/example/persistence_heat_maps.cpp
@@ -65,7 +65,7 @@ int main(int argc, char** argv) {
median.compute_median(vector_of_maps);
// to compute L^1 distance between hm1 and hm2:
- std::cout << "The L^1 distance is : " << hm1.distance(hm2, 1) << std::endl;
+ std::clog << "The L^1 distance is : " << hm1.distance(hm2, 1) << std::endl;
// to average of hm1 and hm2:
std::vector<Persistence_heat_maps*> to_average;
@@ -75,15 +75,15 @@ int main(int argc, char** argv) {
av.compute_average(to_average);
// to compute scalar product of hm1 and hm2:
- std::cout << "Scalar product is : " << hm1.compute_scalar_product(hm2) << std::endl;
+ std::clog << "Scalar product is : " << hm1.compute_scalar_product(hm2) << std::endl;
Persistence_heat_maps hm1k(persistence1, Gaussian_function(1.0));
Persistence_heat_maps hm2k(persistence2, Gaussian_function(1.0));
Persistence_heat_maps hm1i(persistence1, Gaussian_function(1.0), 20, 20, 0, 11, 0, 11);
Persistence_heat_maps hm2i(persistence2, Gaussian_function(1.0), 20, 20, 0, 11, 0, 11);
- std::cout << "Scalar product computed with exact 2D kernel on grid is : " << hm1i.compute_scalar_product(hm2i)
+ std::clog << "Scalar product computed with exact 2D kernel on grid is : " << hm1i.compute_scalar_product(hm2i)
<< std::endl;
- std::cout << "Scalar product computed with exact 2D kernel is : " << hm1k.compute_scalar_product(hm2k) << std::endl;
+ std::clog << "Scalar product computed with exact 2D kernel is : " << hm1k.compute_scalar_product(hm2k) << std::endl;
return 0;
}
diff --git a/src/Persistence_representations/example/persistence_intervals.cpp b/src/Persistence_representations/example/persistence_intervals.cpp
index c908581c..748b9ae4 100644
--- a/src/Persistence_representations/example/persistence_intervals.cpp
+++ b/src/Persistence_representations/example/persistence_intervals.cpp
@@ -18,59 +18,59 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in
int main(int argc, char** argv) {
if (argc != 2) {
- std::cout << "To run this program, please provide the name of a file with persistence diagram \n";
+ std::clog << "To run this program, please provide the name of a file with persistence diagram \n";
return 1;
}
Persistence_intervals p(argv[1]);
std::pair<double, double> min_max_ = p.get_x_range();
- std::cout << "Birth-death range : " << min_max_.first << " " << min_max_.second << std::endl;
+ std::clog << "Birth-death range : " << min_max_.first << " " << min_max_.second << std::endl;
std::vector<double> dominant_ten_intervals_length = p.length_of_dominant_intervals(10);
- std::cout << "Length of ten dominant intervals : " << std::endl;
+ std::clog << "Length of ten dominant intervals : " << std::endl;
for (size_t i = 0; i != dominant_ten_intervals_length.size(); ++i) {
- std::cout << dominant_ten_intervals_length[i] << std::endl;
+ std::clog << dominant_ten_intervals_length[i] << std::endl;
}
std::vector<std::pair<double, double> > ten_dominant_intervals = p.dominant_intervals(10);
- std::cout << "Here are the dominant intervals : " << std::endl;
+ std::clog << "Here are the dominant intervals : " << std::endl;
for (size_t i = 0; i != ten_dominant_intervals.size(); ++i) {
- std::cout << "( " << ten_dominant_intervals[i].first << "," << ten_dominant_intervals[i].second << std::endl;
+ std::clog << "( " << ten_dominant_intervals[i].first << "," << ten_dominant_intervals[i].second << std::endl;
}
std::vector<size_t> histogram = p.histogram_of_lengths(10);
- std::cout << "Here is the histogram of barcode's length : " << std::endl;
+ std::clog << "Here is the histogram of barcode's length : " << std::endl;
for (size_t i = 0; i != histogram.size(); ++i) {
- std::cout << histogram[i] << " ";
+ std::clog << histogram[i] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
std::vector<size_t> cumulative_histogram = p.cumulative_histogram_of_lengths(10);
- std::cout << "Cumulative histogram : " << std::endl;
+ std::clog << "Cumulative histogram : " << std::endl;
for (size_t i = 0; i != cumulative_histogram.size(); ++i) {
- std::cout << cumulative_histogram[i] << " ";
+ std::clog << cumulative_histogram[i] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
std::vector<double> char_funct_diag = p.characteristic_function_of_diagram(min_max_.first, min_max_.second);
- std::cout << "Characteristic function of diagram : " << std::endl;
+ std::clog << "Characteristic function of diagram : " << std::endl;
for (size_t i = 0; i != char_funct_diag.size(); ++i) {
- std::cout << char_funct_diag[i] << " ";
+ std::clog << char_funct_diag[i] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
std::vector<double> cumul_char_funct_diag =
p.cumulative_characteristic_function_of_diagram(min_max_.first, min_max_.second);
- std::cout << "Cumulative characteristic function of diagram : " << std::endl;
+ std::clog << "Cumulative characteristic function of diagram : " << std::endl;
for (size_t i = 0; i != cumul_char_funct_diag.size(); ++i) {
- std::cout << cumul_char_funct_diag[i] << " ";
+ std::clog << cumul_char_funct_diag[i] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
- std::cout << "Persistence Betti numbers \n";
+ std::clog << "Persistence Betti numbers \n";
std::vector<std::pair<double, size_t> > pbns = p.compute_persistent_betti_numbers();
for (size_t i = 0; i != pbns.size(); ++i) {
- std::cout << pbns[i].first << " " << pbns[i].second << std::endl;
+ std::clog << pbns[i].first << " " << pbns[i].second << std::endl;
}
return 0;
diff --git a/src/Persistence_representations/example/persistence_landscape.cpp b/src/Persistence_representations/example/persistence_landscape.cpp
index ff18d105..d39ae0b8 100644
--- a/src/Persistence_representations/example/persistence_landscape.cpp
+++ b/src/Persistence_representations/example/persistence_landscape.cpp
@@ -37,35 +37,35 @@ int main(int argc, char** argv) {
Persistence_landscape l2(persistence2);
// This is how to compute integral of landscapes:
- std::cout << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl;
- std::cout << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl;
+ std::clog << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl;
+ std::clog << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl;
// And here how to write landscapes to stream:
- std::cout << "l1 : " << l1 << std::endl;
- std::cout << "l2 : " << l2 << std::endl;
+ std::clog << "l1 : " << l1 << std::endl;
+ std::clog << "l2 : " << l2 << std::endl;
// Arithmetic operations on landscapes:
Persistence_landscape sum = l1 + l2;
- std::cout << "sum : " << sum << std::endl;
+ std::clog << "sum : " << sum << std::endl;
// here are the maxima of the functions:
- std::cout << "Maximum of l1 : " << l1.compute_maximum() << std::endl;
- std::cout << "Maximum of l2 : " << l2.compute_maximum() << std::endl;
+ std::clog << "Maximum of l1 : " << l1.compute_maximum() << std::endl;
+ std::clog << "Maximum of l2 : " << l2.compute_maximum() << std::endl;
// here are the norms of landscapes:
- std::cout << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl;
- std::cout << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl;
+ std::clog << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl;
+ std::clog << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl;
// here is the average of landscapes:
Persistence_landscape average;
average.compute_average({&l1, &l2});
- std::cout << "average : " << average << std::endl;
+ std::clog << "average : " << average << std::endl;
// here is the distance of landscapes:
- std::cout << "Distance : " << l1.distance(l2) << std::endl;
+ std::clog << "Distance : " << l1.distance(l2) << std::endl;
// here is the scalar product of landscapes:
- std::cout << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl;
+ std::clog << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl;
// here is how to create a file which is suitable for visualization via gnuplot:
average.plot("average_landscape");
diff --git a/src/Persistence_representations/example/persistence_landscape_on_grid.cpp b/src/Persistence_representations/example/persistence_landscape_on_grid.cpp
index 16a58e1d..6d58e167 100644
--- a/src/Persistence_representations/example/persistence_landscape_on_grid.cpp
+++ b/src/Persistence_representations/example/persistence_landscape_on_grid.cpp
@@ -37,31 +37,31 @@ int main(int argc, char** argv) {
Persistence_landscape_on_grid l2(persistence2, 0, 11, 20);
// This is how to compute integral of landscapes:
- std::cout << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl;
- std::cout << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl;
+ std::clog << "Integral of the first landscape : " << l1.compute_integral_of_landscape() << std::endl;
+ std::clog << "Integral of the second landscape : " << l2.compute_integral_of_landscape() << std::endl;
// And here how to write landscapes to stream:
- std::cout << "l1 : " << l1 << std::endl;
- std::cout << "l2 : " << l2 << std::endl;
+ std::clog << "l1 : " << l1 << std::endl;
+ std::clog << "l2 : " << l2 << std::endl;
// here are the maxima of the functions:
- std::cout << "Maximum of l1 : " << l1.compute_maximum() << std::endl;
- std::cout << "Maximum of l2 : " << l2.compute_maximum() << std::endl;
+ std::clog << "Maximum of l1 : " << l1.compute_maximum() << std::endl;
+ std::clog << "Maximum of l2 : " << l2.compute_maximum() << std::endl;
// here are the norms of landscapes:
- std::cout << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl;
- std::cout << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl;
+ std::clog << "L^1 Norm of l1 : " << l1.compute_norm_of_landscape(1.) << std::endl;
+ std::clog << "L^1 Norm of l2 : " << l2.compute_norm_of_landscape(1.) << std::endl;
// here is the average of landscapes:
Persistence_landscape_on_grid average;
average.compute_average({&l1, &l2});
- std::cout << "average : " << average << std::endl;
+ std::clog << "average : " << average << std::endl;
// here is the distance of landscapes:
- std::cout << "Distance : " << l1.distance(l2) << std::endl;
+ std::clog << "Distance : " << l1.distance(l2) << std::endl;
// here is the scalar product of landscapes:
- std::cout << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl;
+ std::clog << "Scalar product : " << l1.compute_scalar_product(l2) << std::endl;
// here is how to create a file which is suitable for visualization via gnuplot:
average.plot("average_landscape");
diff --git a/src/Persistence_representations/example/persistence_vectors.cpp b/src/Persistence_representations/example/persistence_vectors.cpp
index b27e52d2..89e2fb83 100644
--- a/src/Persistence_representations/example/persistence_vectors.cpp
+++ b/src/Persistence_representations/example/persistence_vectors.cpp
@@ -41,19 +41,19 @@ int main(int argc, char** argv) {
Vector_distances_in_diagram v2(persistence2, std::numeric_limits<size_t>::max());
// writing to a stream:
- std::cout << "v1 : " << v1 << std::endl;
- std::cout << "v2 : " << v2 << std::endl;
+ std::clog << "v1 : " << v1 << std::endl;
+ std::clog << "v2 : " << v2 << std::endl;
// averages:
Vector_distances_in_diagram average;
average.compute_average({&v1, &v2});
- std::cout << "Average : " << average << std::endl;
+ std::clog << "Average : " << average << std::endl;
// computations of distances:
- std::cout << "l^1 distance : " << v1.distance(v2) << std::endl;
+ std::clog << "l^1 distance : " << v1.distance(v2) << std::endl;
// computations of scalar product:
- std::cout << "Scalar product of l1 and l2 : " << v1.compute_scalar_product(v2) << std::endl;
+ std::clog << "Scalar product of l1 and l2 : " << v1.compute_scalar_product(v2) << std::endl;
// create a file with a gnuplot script:
v1.plot("plot_of_vector_representation");
diff --git a/src/Persistence_representations/example/sliced_wasserstein.cpp b/src/Persistence_representations/example/sliced_wasserstein.cpp
index d5414d00..d4e31ebf 100644
--- a/src/Persistence_representations/example/sliced_wasserstein.cpp
+++ b/src/Persistence_representations/example/sliced_wasserstein.cpp
@@ -38,10 +38,10 @@ int main(int argc, char** argv) {
SW swex1(persistence1, 1, -1);
SW swex2(persistence2, 1, -1);
- std::cout << "Approx SW kernel: " << sw1.compute_scalar_product(sw2) << std::endl;
- std::cout << "Exact SW kernel: " << swex1.compute_scalar_product(swex2) << std::endl;
- std::cout << "Distance induced by approx SW kernel: " << sw1.distance(sw2) << std::endl;
- std::cout << "Distance induced by exact SW kernel: " << swex1.distance(swex2) << std::endl;
+ std::clog << "Approx SW kernel: " << sw1.compute_scalar_product(sw2) << std::endl;
+ std::clog << "Exact SW kernel: " << swex1.compute_scalar_product(swex2) << std::endl;
+ std::clog << "Distance induced by approx SW kernel: " << sw1.distance(sw2) << std::endl;
+ std::clog << "Distance induced by exact SW kernel: " << swex1.distance(swex2) << std::endl;
return 0;
}
diff --git a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h
index b1af3503..fab88489 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_heat_maps.h
@@ -55,9 +55,9 @@ std::vector<std::vector<double> > create_Gaussian_filter(size_t pixel_radius, do
}
if (dbg) {
- std::cerr << "Kernel initialize \n";
- std::cerr << "pixel_radius : " << pixel_radius << std::endl;
- std::cerr << "kernel.size() : " << kernel.size() << std::endl;
+ std::clog << "Kernel initialize \n";
+ std::clog << "pixel_radius : " << pixel_radius << std::endl;
+ std::clog << "kernel.size() : " << kernel.size() << std::endl;
getchar();
}
@@ -79,12 +79,12 @@ std::vector<std::vector<double> > create_Gaussian_filter(size_t pixel_radius, do
}
if (dbg) {
- std::cerr << "Here is the kernel : \n";
+ std::clog << "Here is the kernel : \n";
for (size_t i = 0; i != kernel.size(); ++i) {
for (size_t j = 0; j != kernel[i].size(); ++j) {
- std::cerr << kernel[i][j] << " ";
+ std::clog << kernel[i][j] << " ";
}
- std::cerr << std::endl;
+ std::clog << std::endl;
}
}
return kernel;
@@ -290,16 +290,16 @@ class Persistence_heat_maps {
bool dbg = false;
if (this->heat_map.size() != second.heat_map.size()) {
if (dbg)
- std::cerr << "this->heat_map.size() : " << this->heat_map.size()
+ std::clog << "this->heat_map.size() : " << this->heat_map.size()
<< " \n second.heat_map.size() : " << second.heat_map.size() << std::endl;
return false;
}
if (this->min_ != second.min_) {
- if (dbg) std::cerr << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl;
+ if (dbg) std::clog << "this->min_ : " << this->min_ << ", second.min_ : " << second.min_ << std::endl;
return false;
}
if (this->max_ != second.max_) {
- if (dbg) std::cerr << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl;
+ if (dbg) std::clog << "this->max_ : " << this->max_ << ", second.max_ : " << second.max_ << std::endl;
return false;
}
// in the other case we may assume that the persistence images are defined on the same domain.
@@ -322,15 +322,15 @@ class Persistence_heat_maps {
bool operator==(const Persistence_heat_maps& rhs) const {
bool dbg = false;
if (!this->check_if_the_same(rhs)) {
- if (dbg) std::cerr << "The domains are not the same \n";
+ if (dbg) std::clog << "The domains are not the same \n";
return false; // in this case, the domains are not the same, so the maps cannot be the same.
}
for (size_t i = 0; i != this->heat_map.size(); ++i) {
for (size_t j = 0; j != this->heat_map[i].size(); ++j) {
if (!almost_equal(this->heat_map[i][j], rhs.heat_map[i][j])) {
if (dbg) {
- std::cerr << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl;
- std::cerr << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl;
+ std::clog << "this->heat_map[" << i << "][" << j << "] = " << this->heat_map[i][j] << std::endl;
+ std::clog << "rhs.heat_map[" << i << "][" << j << "] = " << rhs.heat_map[i][j] << std::endl;
}
return false;
}
@@ -586,14 +586,14 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
bool erase_below_diagonal, size_t number_of_pixels,
double min_, double max_) {
bool dbg = false;
- if (dbg) std::cerr << "Entering construct procedure \n";
+ if (dbg) std::clog << "Entering construct procedure \n";
Scalling_of_kernels f;
this->f = f;
- if (dbg) std::cerr << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl;
+ if (dbg) std::clog << "min and max passed to construct() procedure: " << min_ << " " << max_ << std::endl;
if (min_ == max_) {
- if (dbg) std::cerr << "min and max parameters will be determined based on intervals \n";
+ if (dbg) std::clog << "min and max parameters will be determined based on intervals \n";
// in this case, we want the program to set up the min_ and max_ values by itself.
min_ = std::numeric_limits<int>::max();
max_ = -std::numeric_limits<int>::max();
@@ -611,9 +611,9 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
}
if (dbg) {
- std::cerr << "min_ : " << min_ << std::endl;
- std::cerr << "max_ : " << max_ << std::endl;
- std::cerr << "number_of_pixels : " << number_of_pixels << std::endl;
+ std::clog << "min_ : " << min_ << std::endl;
+ std::clog << "max_ : " << max_ << std::endl;
+ std::clog << "number_of_pixels : " << number_of_pixels << std::endl;
getchar();
}
@@ -628,7 +628,7 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
}
this->heat_map = heat_map_;
- if (dbg) std::cerr << "Done creating of the heat map, now we will fill in the structure \n";
+ if (dbg) std::clog << "Done creating of the heat map, now we will fill in the structure \n";
for (size_t pt_nr = 0; pt_nr != intervals_.size(); ++pt_nr) {
// compute the value of intervals_[pt_nr] in the grid:
@@ -638,9 +638,9 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
static_cast<int>((intervals_[pt_nr].second - this->min_) / (this->max_ - this->min_) * number_of_pixels);
if (dbg) {
- std::cerr << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl;
- std::cerr << "x_grid : " << x_grid << std::endl;
- std::cerr << "y_grid : " << y_grid << std::endl;
+ std::clog << "point : " << intervals_[pt_nr].first << " , " << intervals_[pt_nr].second << std::endl;
+ std::clog << "x_grid : " << x_grid << std::endl;
+ std::clog << "y_grid : " << y_grid << std::endl;
}
// x_grid and y_grid gives a center of the kernel. We want to have its lower left corner. To get this, we need to
@@ -650,9 +650,9 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
// note that the numbers x_grid and y_grid may be negative.
if (dbg) {
- std::cerr << "After shift : \n";
- std::cerr << "x_grid : " << x_grid << std::endl;
- std::cerr << "y_grid : " << y_grid << std::endl;
+ std::clog << "After shift : \n";
+ std::clog << "x_grid : " << x_grid << std::endl;
+ std::clog << "y_grid : " << y_grid << std::endl;
}
double scaling_value = this->f(intervals_[pt_nr]);
@@ -663,11 +663,11 @@ void Persistence_heat_maps<Scalling_of_kernels>::construct(const std::vector<std
if (((x_grid + i) >= 0) && (x_grid + i < this->heat_map.size()) && ((y_grid + j) >= 0) &&
(y_grid + j < this->heat_map.size())) {
if (dbg) {
- std::cerr << y_grid + j << " " << x_grid + i << std::endl;
+ std::clog << y_grid + j << " " << x_grid + i << std::endl;
}
this->heat_map[y_grid + j][x_grid + i] += scaling_value * filter[i][j];
if (dbg) {
- std::cerr << "Position : (" << x_grid + i << "," << y_grid + j
+ std::clog << "Position : (" << x_grid + i << "," << y_grid + j
<< ") got increased by the value : " << filter[i][j] << std::endl;
}
}
@@ -805,7 +805,7 @@ void Persistence_heat_maps<Scalling_of_kernels>::plot(const char* filename) cons
out << std::endl;
}
out.close();
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
}
@@ -842,7 +842,7 @@ void Persistence_heat_maps<Scalling_of_kernels>::load_from_file(const char* file
in >> this->min_ >> this->max_;
if (dbg) {
- std::cerr << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl;
+ std::clog << "Reading the following values of min and max : " << this->min_ << " , " << this->max_ << std::endl;
}
std::string temp;
@@ -859,18 +859,18 @@ void Persistence_heat_maps<Scalling_of_kernels>::load_from_file(const char* file
lineSS >> point;
line_of_heat_map.push_back(point);
if (dbg) {
- std::cout << point << " ";
+ std::clog << point << " ";
}
}
if (dbg) {
- std::cout << std::endl;
+ std::clog << std::endl;
getchar();
}
if (in.good()) this->heat_map.push_back(line_of_heat_map);
}
in.close();
- if (dbg) std::cout << "Done \n";
+ if (dbg) std::clog << "Done \n";
}
// Concretizations of virtual methods:
@@ -878,7 +878,7 @@ template <typename Scalling_of_kernels>
std::vector<double> Persistence_heat_maps<Scalling_of_kernels>::vectorize(int number_of_function) const {
std::vector<double> result;
if (!discrete) {
- std::cout << "No vectorize method in case of infinite dimensional vectorization" << std::endl;
+ std::cerr << "No vectorize method in case of infinite dimensional vectorization" << std::endl;
return result;
}
diff --git a/src/Persistence_representations/include/gudhi/Persistence_intervals.h b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
index ea4220ea..f4324cb2 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_intervals.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
@@ -109,7 +109,7 @@ class Persistence_intervals {
std::vector<size_t> cumulative_histogram_of_lengths(size_t number_of_bins = 10) const;
/**
- * In this procedure we assume that each barcode is a characteristic function of a hight equal to its length. The
+ * In this procedure we assume that each barcode is a characteristic function of a height equal to its length. The
*persistence diagram is a sum of such a functions. The procedure below construct a function being a
* sum of the characteristic functions of persistence intervals. The first two parameters are the range in which the
*function is to be computed and the last parameter is the number of bins in
@@ -185,7 +185,7 @@ class Persistence_intervals {
out.close();
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
}
@@ -207,7 +207,7 @@ class Persistence_intervals {
/**
* This is a simple function projecting the persistence intervals to a real number. The function we use here is a sum
*of squared lengths of intervals. It can be naturally interpreted as
- * sum of step function, where the step hight it equal to the length of the interval.
+ * sum of step function, where the step height it equal to the length of the interval.
* At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
*using it, keep in mind that it
* will be most likely changed in the next versions.
@@ -293,7 +293,7 @@ std::vector<std::pair<double, double> > Persistence_intervals::dominant_interval
for (size_t i = 0; i != std::min(where_to_cut, position_length_vector.size()); ++i) {
result.push_back(this->intervals[position_length_vector[i].first]);
if (dbg)
- std::cerr << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second
+ std::clog << "Position : " << position_length_vector[i].first << " length : " << position_length_vector[i].second
<< std::endl;
}
@@ -303,7 +303,7 @@ std::vector<std::pair<double, double> > Persistence_intervals::dominant_interval
std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of_bins) const {
bool dbg = false;
- if (dbg) std::cerr << "this->intervals.size() : " << this->intervals.size() << std::endl;
+ if (dbg) std::clog << "this->intervals.size() : " << this->intervals.size() << std::endl;
// first find the length of the longest interval:
double lengthOfLongest = 0;
for (size_t i = 0; i != this->intervals.size(); ++i) {
@@ -313,7 +313,7 @@ std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of
}
if (dbg) {
- std::cerr << "lengthOfLongest : " << lengthOfLongest << std::endl;
+ std::clog << "lengthOfLongest : " << lengthOfLongest << std::endl;
}
// this is a container we will use to store the resulting histogram
@@ -330,10 +330,10 @@ std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of
++result[position];
if (dbg) {
- std::cerr << "i : " << i << std::endl;
- std::cerr << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n";
- std::cerr << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl;
- std::cerr << "position : " << position << std::endl;
+ std::clog << "i : " << i << std::endl;
+ std::clog << "Interval : [" << this->intervals[i].first << " , " << this->intervals[i].second << " ] \n";
+ std::clog << "relative_length_of_this_interval : " << relative_length_of_this_interval << std::endl;
+ std::clog << "position : " << position << std::endl;
getchar();
}
}
@@ -342,7 +342,7 @@ std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of
result.resize(number_of_bins);
if (dbg) {
- for (size_t i = 0; i != result.size(); ++i) std::cerr << result[i] << std::endl;
+ for (size_t i = 0; i != result.size(); ++i) std::clog << result[i] << std::endl;
}
return result;
}
@@ -368,7 +368,7 @@ std::vector<double> Persistence_intervals::characteristic_function_of_diagram(do
for (size_t i = 0; i != this->intervals.size(); ++i) {
if (dbg) {
- std::cerr << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
+ std::clog << "Interval : " << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
}
size_t beginIt = 0;
@@ -390,8 +390,8 @@ std::vector<double> Persistence_intervals::characteristic_function_of_diagram(do
}
if (dbg) {
- std::cerr << "beginIt : " << beginIt << std::endl;
- std::cerr << "endIt : " << endIt << std::endl;
+ std::clog << "beginIt : " << beginIt << std::endl;
+ std::clog << "endIt : " << endIt << std::endl;
}
for (size_t pos = beginIt; pos != endIt; ++pos) {
@@ -399,11 +399,11 @@ std::vector<double> Persistence_intervals::characteristic_function_of_diagram(do
(this->intervals[i].second - this->intervals[i].first);
}
if (dbg) {
- std::cerr << "Result at this stage \n";
+ std::clog << "Result at this stage \n";
for (size_t aa = 0; aa != result.size(); ++aa) {
- std::cerr << result[aa] << " ";
+ std::clog << result[aa] << " ";
}
- std::cerr << std::endl;
+ std::clog << std::endl;
}
}
return result;
@@ -455,9 +455,9 @@ inline double compute_euclidean_distance(const std::pair<double, double>& f, con
std::vector<double> Persistence_intervals::k_n_n(size_t k, size_t where_to_cut) const {
bool dbg = false;
if (dbg) {
- std::cerr << "Here are the intervals : \n";
+ std::clog << "Here are the intervals : \n";
for (size_t i = 0; i != this->intervals.size(); ++i) {
- std::cerr << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n";
+ std::clog << "[ " << this->intervals[i].first << " , " << this->intervals[i].second << "] \n";
}
getchar();
}
@@ -486,12 +486,12 @@ std::vector<double> Persistence_intervals::k_n_n(size_t k, size_t where_to_cut)
distances_from_diagonal[i] = distanceToDiagonal;
if (dbg) {
- std::cerr << "Here are the distances form the point : [" << this->intervals[i].first << " , "
+ std::clog << "Here are the distances form the point : [" << this->intervals[i].first << " , "
<< this->intervals[i].second << "] in the diagram \n";
for (size_t aa = 0; aa != distancesFromI.size(); ++aa) {
- std::cerr << "To : " << i + aa << " : " << distancesFromI[aa] << " ";
+ std::clog << "To : " << i + aa << " : " << distancesFromI[aa] << " ";
}
- std::cerr << std::endl;
+ std::clog << std::endl;
getchar();
}
@@ -502,18 +502,18 @@ std::vector<double> Persistence_intervals::k_n_n(size_t k, size_t where_to_cut)
}
}
if (dbg) {
- std::cerr << "Here is the distance matrix : \n";
+ std::clog << "Here is the distance matrix : \n";
for (size_t i = 0; i != distances.size(); ++i) {
for (size_t j = 0; j != distances.size(); ++j) {
- std::cerr << distances[i][j] << " ";
+ std::clog << distances[i][j] << " ";
}
- std::cerr << std::endl;
+ std::clog << std::endl;
}
- std::cerr << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl;
+ std::clog << std::endl << std::endl << "And here are the distances to the diagonal : " << std::endl;
for (size_t i = 0; i != distances_from_diagonal.size(); ++i) {
- std::cerr << distances_from_diagonal[i] << " ";
+ std::clog << distances_from_diagonal[i] << " ";
}
- std::cerr << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
getchar();
}
@@ -526,13 +526,13 @@ std::vector<double> Persistence_intervals::k_n_n(size_t k, size_t where_to_cut)
if (k > distancesFromI.size()) {
if (dbg) {
- std::cerr << "There are not enough neighbors in your set. We set the result to plus infty \n";
+ std::clog << "There are not enough neighbors in your set. We set the result to plus infty \n";
}
result.push_back(std::numeric_limits<double>::max());
} else {
if (distances_from_diagonal[i] > distancesFromI[k]) {
if (dbg) {
- std::cerr << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n";
+ std::clog << "The k-th n.n. is on a diagonal. Therefore we set up a distance to diagonal \n";
}
result.push_back(distances_from_diagonal[i]);
} else {
diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape.h b/src/Persistence_representations/include/gudhi/Persistence_landscape.h
index b819ccb6..ce4065b8 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_landscape.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_landscape.h
@@ -343,7 +343,7 @@ class Persistence_landscape {
bool dbg = false;
if (dbg) {
- std::cerr << "to_average.size() : " << to_average.size() << std::endl;
+ std::clog << "to_average.size() : " << to_average.size() << std::endl;
}
std::vector<Persistence_landscape*> nextLevelMerge(to_average.size());
@@ -357,13 +357,13 @@ class Persistence_landscape {
while (nextLevelMerge.size() != 1) {
if (dbg) {
- std::cerr << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl;
+ std::clog << "nextLevelMerge.size() : " << nextLevelMerge.size() << std::endl;
}
std::vector<Persistence_landscape*> nextNextLevelMerge;
nextNextLevelMerge.reserve(to_average.size());
for (size_t i = 0; i < nextLevelMerge.size(); i = i + 2) {
if (dbg) {
- std::cerr << "i : " << i << std::endl;
+ std::clog << "i : " << i << std::endl;
}
Persistence_landscape* l = new Persistence_landscape;
if (i + 1 != nextLevelMerge.size()) {
@@ -374,7 +374,7 @@ class Persistence_landscape {
nextNextLevelMerge.push_back(l);
}
if (dbg) {
- std::cerr << "After this iteration \n";
+ std::clog << "After this iteration \n";
getchar();
}
@@ -471,25 +471,25 @@ Persistence_landscape::Persistence_landscape(const char* filename, size_t dimens
bool operatorEqualDbg = false;
bool Persistence_landscape::operator==(const Persistence_landscape& rhs) const {
if (this->land.size() != rhs.land.size()) {
- if (operatorEqualDbg) std::cerr << "1\n";
+ if (operatorEqualDbg) std::clog << "1\n";
return false;
}
for (size_t level = 0; level != this->land.size(); ++level) {
if (this->land[level].size() != rhs.land[level].size()) {
- if (operatorEqualDbg) std::cerr << "this->land[level].size() : " << this->land[level].size() << "\n";
- if (operatorEqualDbg) std::cerr << "rhs.land[level].size() : " << rhs.land[level].size() << "\n";
- if (operatorEqualDbg) std::cerr << "2\n";
+ if (operatorEqualDbg) std::clog << "this->land[level].size() : " << this->land[level].size() << "\n";
+ if (operatorEqualDbg) std::clog << "rhs.land[level].size() : " << rhs.land[level].size() << "\n";
+ if (operatorEqualDbg) std::clog << "2\n";
return false;
}
for (size_t i = 0; i != this->land[level].size(); ++i) {
if (!(almost_equal(this->land[level][i].first, rhs.land[level][i].first) &&
almost_equal(this->land[level][i].second, rhs.land[level][i].second))) {
if (operatorEqualDbg)
- std::cerr << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second
+ std::clog << "this->land[level][i] : " << this->land[level][i].first << " " << this->land[level][i].second
<< "\n";
if (operatorEqualDbg)
- std::cerr << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n";
- if (operatorEqualDbg) std::cerr << "3\n";
+ std::clog << "rhs.land[level][i] : " << rhs.land[level][i].first << " " << rhs.land[level][i].second << "\n";
+ if (operatorEqualDbg) std::clog << "3\n";
return false;
}
}
@@ -507,7 +507,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
const std::vector<std::pair<double, double> >& p, size_t number_of_levels) {
bool dbg = false;
if (dbg) {
- std::cerr << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )"
+ std::clog << "Persistence_landscape::Persistence_landscape( const std::vector< std::pair< double , double > >& p )"
<< std::endl;
}
@@ -517,9 +517,9 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
std::sort(bars.begin(), bars.end(), compare_points_sorting);
if (dbg) {
- std::cerr << "Bars : \n";
+ std::clog << "Bars : \n";
for (size_t i = 0; i != bars.size(); ++i) {
- std::cerr << bars[i].first << " " << bars[i].second << "\n";
+ std::clog << bars[i].first << " " << bars[i].second << "\n";
}
getchar();
}
@@ -534,7 +534,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
while (!characteristicPoints.empty()) {
if (dbg) {
for (size_t i = 0; i != characteristicPoints.size(); ++i) {
- std::cout << "(" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n";
+ std::clog << "(" << characteristicPoints[i].first << " " << characteristicPoints[i].second << ")\n";
}
std::cin.ignore();
}
@@ -545,7 +545,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
lambda_n.push_back(characteristicPoints[0]);
if (dbg) {
- std::cerr << "1 Adding to lambda_n : (" << -std::numeric_limits<int>::max() << " " << 0 << ") , ("
+ std::clog << "1 Adding to lambda_n : (" << -std::numeric_limits<int>::max() << " " << 0 << ") , ("
<< minus_length(characteristicPoints[0]) << " " << 0 << ") , (" << characteristicPoints[0].first << " "
<< characteristicPoints[0].second << ") \n";
}
@@ -562,13 +562,13 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
(birth_plus_deaths(lambda_n[lambda_n.size() - 1]) - minus_length(characteristicPoints[i])) / 2);
lambda_n.push_back(point);
if (dbg) {
- std::cerr << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n";
+ std::clog << "2 Adding to lambda_n : (" << point.first << " " << point.second << ")\n";
}
if (dbg) {
- std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
+ std::clog << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
<< characteristicPoints[i + p].second << "\n";
- std::cerr << "point : " << point.first << " " << point.second << "\n";
+ std::clog << "point : " << point.first << " " << point.second << "\n";
getchar();
}
@@ -577,7 +577,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
(birth_plus_deaths(point) <= birth_plus_deaths(characteristicPoints[i + p]))) {
newCharacteristicPoints.push_back(characteristicPoints[i + p]);
if (dbg) {
- std::cerr << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
+ std::clog << "3.5 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
<< characteristicPoints[i + p].second << ")\n";
getchar();
}
@@ -586,7 +586,7 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
newCharacteristicPoints.push_back(point);
if (dbg) {
- std::cerr << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n";
+ std::clog << "4 Adding to newCharacteristicPoints : (" << point.first << " " << point.second << ")\n";
}
while ((i + p < characteristicPoints.size()) &&
@@ -594,15 +594,15 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
(birth_plus_deaths(point) >= birth_plus_deaths(characteristicPoints[i + p]))) {
newCharacteristicPoints.push_back(characteristicPoints[i + p]);
if (dbg) {
- std::cerr << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
+ std::clog << "characteristicPoints[i+p] : " << characteristicPoints[i + p].first << " "
<< characteristicPoints[i + p].second << "\n";
- std::cerr << "point : " << point.first << " " << point.second << "\n";
- std::cerr << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p])
+ std::clog << "point : " << point.first << " " << point.second << "\n";
+ std::clog << "characteristicPoints[i+p] birth and death : " << minus_length(characteristicPoints[i + p])
<< " , " << birth_plus_deaths(characteristicPoints[i + p]) << "\n";
- std::cerr << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point)
+ std::clog << "point birth and death : " << minus_length(point) << " , " << birth_plus_deaths(point)
<< "\n";
- std::cerr << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
+ std::clog << "3 Adding to newCharacteristicPoints : (" << characteristicPoints[i + p].first << " "
<< characteristicPoints[i + p].second << ")\n";
getchar();
}
@@ -613,20 +613,20 @@ void Persistence_landscape::construct_persistence_landscape_from_barcode(
lambda_n.push_back(std::make_pair(birth_plus_deaths(lambda_n[lambda_n.size() - 1]), 0));
lambda_n.push_back(std::make_pair(minus_length(characteristicPoints[i]), 0));
if (dbg) {
- std::cerr << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0
+ std::clog << "5 Adding to lambda_n : (" << birth_plus_deaths(lambda_n[lambda_n.size() - 1]) << " " << 0
<< ")\n";
- std::cerr << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n";
+ std::clog << "5 Adding to lambda_n : (" << minus_length(characteristicPoints[i]) << " " << 0 << ")\n";
}
}
lambda_n.push_back(characteristicPoints[i]);
if (dbg) {
- std::cerr << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " "
+ std::clog << "6 Adding to lambda_n : (" << characteristicPoints[i].first << " "
<< characteristicPoints[i].second << ")\n";
}
} else {
newCharacteristicPoints.push_back(characteristicPoints[i]);
if (dbg) {
- std::cerr << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " "
+ std::clog << "7 Adding to newCharacteristicPoints : (" << characteristicPoints[i].first << " "
<< characteristicPoints[i].second << ")\n";
}
}
@@ -692,7 +692,7 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const {
double result = 0;
for (size_t i = 0; i != this->land.size(); ++i) {
for (size_t nr = 2; nr != this->land[i].size() - 1; ++nr) {
- if (dbg) std::cout << "nr : " << nr << "\n";
+ if (dbg) std::clog << "nr : " << nr << "\n";
// In this interval, the landscape has a form f(x) = ax+b. We want to compute integral of (ax+b)^p = 1/a *
// (ax+b)^{p+1}/(p+1)
std::pair<double, double> coef = compute_parameters_of_a_line(this->land[i][nr], this->land[i][nr - 1]);
@@ -700,7 +700,7 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const {
double b = coef.second;
if (dbg)
- std::cout << "(" << this->land[i][nr].first << "," << this->land[i][nr].second << ") , "
+ std::clog << "(" << this->land[i][nr].first << "," << this->land[i][nr].second << ") , "
<< this->land[i][nr - 1].first << "," << this->land[i][nr].second << ")" << std::endl;
if (this->land[i][nr].first == this->land[i][nr - 1].first) continue;
if (a != 0) {
@@ -710,8 +710,8 @@ double Persistence_landscape::compute_integral_of_landscape(double p) const {
result += (this->land[i][nr].first - this->land[i][nr - 1].first) * (pow(this->land[i][nr].second, p));
}
if (dbg) {
- std::cout << "a : " << a << " , b : " << b << std::endl;
- std::cout << "result : " << result << std::endl;
+ std::clog << "a : " << a << " , b : " << b << std::endl;
+ std::clog << "result : " << result << std::endl;
}
}
}
@@ -730,31 +730,31 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou
unsigned coordEnd = this->land[level].size() - 2;
if (compute_value_at_a_given_pointDbg) {
- std::cerr << "Here \n";
- std::cerr << "x : " << x << "\n";
- std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
- std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
+ std::clog << "Here \n";
+ std::clog << "x : " << x << "\n";
+ std::clog << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
+ std::clog << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
}
// in this case x is outside the support of the landscape, therefore the value of the landscape is 0.
if (x <= this->land[level][coordBegin].first) return 0;
if (x >= this->land[level][coordEnd].first) return 0;
- if (compute_value_at_a_given_pointDbg) std::cerr << "Entering to the while loop \n";
+ if (compute_value_at_a_given_pointDbg) std::clog << "Entering to the while loop \n";
while (coordBegin + 1 != coordEnd) {
if (compute_value_at_a_given_pointDbg) {
- std::cerr << "coordBegin : " << coordBegin << "\n";
- std::cerr << "coordEnd : " << coordEnd << "\n";
- std::cerr << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
- std::cerr << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
+ std::clog << "coordBegin : " << coordBegin << "\n";
+ std::clog << "coordEnd : " << coordEnd << "\n";
+ std::clog << "this->land[level][coordBegin].first : " << this->land[level][coordBegin].first << "\n";
+ std::clog << "this->land[level][coordEnd].first : " << this->land[level][coordEnd].first << "\n";
}
unsigned newCord = (unsigned)floor((coordEnd + coordBegin) / 2.0);
if (compute_value_at_a_given_pointDbg) {
- std::cerr << "newCord : " << newCord << "\n";
- std::cerr << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n";
+ std::clog << "newCord : " << newCord << "\n";
+ std::clog << "this->land[level][newCord].first : " << this->land[level][newCord].first << "\n";
std::cin.ignore();
}
@@ -767,12 +767,12 @@ double Persistence_landscape::compute_value_at_a_given_point(unsigned level, dou
}
if (compute_value_at_a_given_pointDbg) {
- std::cout << "x : " << x << " is between : " << this->land[level][coordBegin].first << " a "
+ std::clog << "x : " << x << " is between : " << this->land[level][coordBegin].first << " a "
<< this->land[level][coordEnd].first << "\n";
- std::cout << "the y coords are : " << this->land[level][coordBegin].second << " a "
+ std::clog << "the y coords are : " << this->land[level][coordBegin].second << " a "
<< this->land[level][coordEnd].second << "\n";
- std::cerr << "coordBegin : " << coordBegin << "\n";
- std::cerr << "coordEnd : " << coordEnd << "\n";
+ std::clog << "coordBegin : " << coordBegin << "\n";
+ std::clog << "coordEnd : " << coordEnd << "\n";
std::cin.ignore();
}
return function_value(this->land[level][coordBegin], this->land[level][coordEnd], x);
@@ -810,13 +810,13 @@ Persistence_landscape Persistence_landscape::abs() {
Persistence_landscape result;
for (size_t level = 0; level != this->land.size(); ++level) {
if (AbsDbg) {
- std::cout << "level: " << level << std::endl;
+ std::clog << "level: " << level << std::endl;
}
std::vector<std::pair<double, double> > lambda_n;
lambda_n.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
for (size_t i = 1; i != this->land[level].size(); ++i) {
if (AbsDbg) {
- std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
+ std::clog << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
<< this->land[level][i].second << std::endl;
}
// if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to
@@ -828,15 +828,15 @@ Persistence_landscape Persistence_landscape::abs() {
lambda_n.push_back(std::make_pair(zero, 0));
lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
if (AbsDbg) {
- std::cout << "Adding pair : (" << zero << ",0)" << std::endl;
- std::cout << "In the same step adding pair : (" << this->land[level][i].first << ","
+ std::clog << "Adding pair : (" << zero << ",0)" << std::endl;
+ std::clog << "In the same step adding pair : (" << this->land[level][i].first << ","
<< fabs(this->land[level][i].second) << ") " << std::endl;
std::cin.ignore();
}
} else {
lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
if (AbsDbg) {
- std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
+ std::clog << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
<< ") " << std::endl;
std::cin.ignore();
}
@@ -851,13 +851,13 @@ Persistence_landscape* Persistence_landscape::new_abs() {
Persistence_landscape* result = new Persistence_landscape(*this);
for (size_t level = 0; level != this->land.size(); ++level) {
if (AbsDbg) {
- std::cout << "level: " << level << std::endl;
+ std::clog << "level: " << level << std::endl;
}
std::vector<std::pair<double, double> > lambda_n;
lambda_n.push_back(std::make_pair(-std::numeric_limits<int>::max(), 0));
for (size_t i = 1; i != this->land[level].size(); ++i) {
if (AbsDbg) {
- std::cout << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
+ std::clog << "this->land[" << level << "][" << i << "] : " << this->land[level][i].first << " "
<< this->land[level][i].second << std::endl;
}
// if a line segment between this->land[level][i-1] and this->land[level][i] crosses the x-axis, then we have to
@@ -869,15 +869,15 @@ Persistence_landscape* Persistence_landscape::new_abs() {
lambda_n.push_back(std::make_pair(zero, 0));
lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
if (AbsDbg) {
- std::cout << "Adding pair : (" << zero << ",0)" << std::endl;
- std::cout << "In the same step adding pair : (" << this->land[level][i].first << ","
+ std::clog << "Adding pair : (" << zero << ",0)" << std::endl;
+ std::clog << "In the same step adding pair : (" << this->land[level][i].first << ","
<< fabs(this->land[level][i].second) << ") " << std::endl;
std::cin.ignore();
}
} else {
lambda_n.push_back(std::make_pair(this->land[level][i].first, fabs(this->land[level][i].second)));
if (AbsDbg) {
- std::cout << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
+ std::clog << "Adding pair : (" << this->land[level][i].first << "," << fabs(this->land[level][i].second)
<< ") " << std::endl;
std::cin.ignore();
}
@@ -943,11 +943,11 @@ void Persistence_landscape::load_landscape_from_file(const char* filename) {
lineSS >> endd;
landscapeAtThisLevel.push_back(std::make_pair(beginn, endd));
if (dbg) {
- std::cerr << "Reading a point : " << beginn << " , " << endd << std::endl;
+ std::clog << "Reading a point : " << beginn << " , " << endd << std::endl;
}
} else {
if (dbg) {
- std::cout << "IGNORE LINE\n";
+ std::clog << "IGNORE LINE\n";
getchar();
}
if (!isThisAFirsLine) {
@@ -975,7 +975,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
const Persistence_landscape& land2) {
bool operation_on_pair_of_landscapesDBG = false;
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "operation_on_pair_of_landscapes\n";
+ std::clog << "operation_on_pair_of_landscapes\n";
std::cin.ignore();
}
Persistence_landscape result;
@@ -985,8 +985,8 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
if (operation_on_pair_of_landscapesDBG) {
for (size_t i = 0; i != std::min(land1.land.size(), land2.land.size()); ++i) {
- std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
- std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
+ std::clog << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
+ std::clog << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
}
getchar();
}
@@ -997,20 +997,20 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
size_t q = 0;
while ((p + 1 < land1.land[i].size()) && (q + 1 < land2.land[i].size())) {
if (operation_on_pair_of_landscapesDBG) {
- std::cerr << "p : " << p << "\n";
- std::cerr << "q : " << q << "\n";
- std::cerr << "land1.land.size() : " << land1.land.size() << std::endl;
- std::cerr << "land2.land.size() : " << land2.land.size() << std::endl;
- std::cerr << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
- std::cerr << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
- std::cout << "land1.land[i][p].first : " << land1.land[i][p].first << "\n";
- std::cout << "land2.land[i][q].first : " << land2.land[i][q].first << "\n";
+ std::clog << "p : " << p << "\n";
+ std::clog << "q : " << q << "\n";
+ std::clog << "land1.land.size() : " << land1.land.size() << std::endl;
+ std::clog << "land2.land.size() : " << land2.land.size() << std::endl;
+ std::clog << "land1.land[" << i << "].size() : " << land1.land[i].size() << std::endl;
+ std::clog << "land2.land[" << i << "].size() : " << land2.land[i].size() << std::endl;
+ std::clog << "land1.land[i][p].first : " << land1.land[i][p].first << "\n";
+ std::clog << "land2.land[i][q].first : " << land2.land[i][q].first << "\n";
}
if (land1.land[i][p].first < land2.land[i][q].first) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "first \n";
- std::cout << " function_value(land2.land[i][q-1],land2.land[i][q],land1.land[i][p].first) : "
+ std::clog << "first \n";
+ std::clog << " function_value(land2.land[i][q-1],land2.land[i][q],land1.land[i][p].first) : "
<< function_value(land2.land[i][q - 1], land2.land[i][q], land1.land[i][p].first) << "\n";
}
lambda_n.push_back(
@@ -1022,12 +1022,12 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
}
if (land1.land[i][p].first > land2.land[i][q].first) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "Second \n";
- std::cout << "function_value(" << land1.land[i][p - 1].first << " " << land1.land[i][p - 1].second << " ,"
+ std::clog << "Second \n";
+ std::clog << "function_value(" << land1.land[i][p - 1].first << " " << land1.land[i][p - 1].second << " ,"
<< land1.land[i][p].first << " " << land1.land[i][p].second << ", " << land2.land[i][q].first
<< " ) : " << function_value(land1.land[i][p - 1], land1.land[i][p - 1], land2.land[i][q].first)
<< "\n";
- std::cout << "oper( " << function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first) << ","
+ std::clog << "oper( " << function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first) << ","
<< land2.land[i][q].second << " : "
<< oper(land2.land[i][q].second,
function_value(land1.land[i][p], land1.land[i][p - 1], land2.land[i][q].first))
@@ -1040,19 +1040,19 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
continue;
}
if (land1.land[i][p].first == land2.land[i][q].first) {
- if (operation_on_pair_of_landscapesDBG) std::cout << "Third \n";
+ if (operation_on_pair_of_landscapesDBG) std::clog << "Third \n";
lambda_n.push_back(
std::make_pair(land2.land[i][q].first, oper(land1.land[i][p].second, land2.land[i][q].second)));
++p;
++q;
}
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "Next iteration \n";
+ std::clog << "Next iteration \n";
}
}
while ((p + 1 < land1.land[i].size()) && (q + 1 >= land2.land[i].size())) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "New point : " << land1.land[i][p].first
+ std::clog << "New point : " << land1.land[i][p].first
<< " oper(land1.land[i][p].second,0) : " << oper(land1.land[i][p].second, 0) << std::endl;
}
lambda_n.push_back(std::make_pair(land1.land[i][p].first, oper(land1.land[i][p].second, 0)));
@@ -1060,7 +1060,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
}
while ((p + 1 >= land1.land[i].size()) && (q + 1 < land2.land[i].size())) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "New point : " << land2.land[i][q].first
+ std::clog << "New point : " << land2.land[i][q].first
<< " oper(0,land2.land[i][q].second) : " << oper(0, land2.land[i][q].second) << std::endl;
}
lambda_n.push_back(std::make_pair(land2.land[i][q].first, oper(0, land2.land[i][q].second)));
@@ -1073,7 +1073,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
}
if (land1.land.size() > std::min(land1.land.size(), land2.land.size())) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "land1.land.size() > std::min( land1.land.size() , land2.land.size() )" << std::endl;
+ std::clog << "land1.land.size() > std::min( land1.land.size() , land2.land.size() )" << std::endl;
}
for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size());
++i) {
@@ -1088,7 +1088,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
}
if (land2.land.size() > std::min(land1.land.size(), land2.land.size())) {
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "( land2.land.size() > std::min( land1.land.size() , land2.land.size() ) ) " << std::endl;
+ std::clog << "( land2.land.size() > std::min( land1.land.size() , land2.land.size() ) ) " << std::endl;
}
for (size_t i = std::min(land1.land.size(), land2.land.size()); i != std::max(land1.land.size(), land2.land.size());
++i) {
@@ -1102,7 +1102,7 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
}
}
if (operation_on_pair_of_landscapesDBG) {
- std::cout << "operation_on_pair_of_landscapes END\n";
+ std::clog << "operation_on_pair_of_landscapes END\n";
std::cin.ignore();
}
return result;
@@ -1110,20 +1110,20 @@ Persistence_landscape operation_on_pair_of_landscapes(const Persistence_landscap
double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1, const Persistence_landscape& pl2) {
bool dbg = false;
- if (dbg) std::cerr << " compute_maximal_distance_non_symmetric \n";
+ if (dbg) std::clog << " compute_maximal_distance_non_symmetric \n";
// this distance is not symmetric. It compute ONLY distance between inflection points of pl1 and pl2.
double maxDist = 0;
size_t minimalNumberOfLevels = std::min(pl1.land.size(), pl2.land.size());
for (size_t level = 0; level != minimalNumberOfLevels; ++level) {
if (dbg) {
- std::cerr << "Level : " << level << std::endl;
- std::cerr << "PL1 : \n";
+ std::clog << "Level : " << level << std::endl;
+ std::clog << "PL1 : \n";
for (size_t i = 0; i != pl1.land[level].size(); ++i) {
- std::cerr << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n";
+ std::clog << "(" << pl1.land[level][i].first << "," << pl1.land[level][i].second << ") \n";
}
- std::cerr << "PL2 : \n";
+ std::clog << "PL2 : \n";
for (size_t i = 0; i != pl2.land[level].size(); ++i) {
- std::cerr << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n";
+ std::clog << "(" << pl2.land[level][i].first << "," << pl2.land[level][i].second << ") \n";
}
std::cin.ignore();
}
@@ -1143,24 +1143,24 @@ double compute_maximal_distance_non_symmetric(const Persistence_landscape& pl1,
if (maxDist <= val) maxDist = val;
if (dbg) {
- std::cerr << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << ","
+ std::clog << pl1.land[level][i].first << "in [" << pl2.land[level][p2Count].first << ","
<< pl2.land[level][p2Count + 1].first << "] \n";
- std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
- std::cerr << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : "
+ std::clog << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
+ std::clog << "function_value( pl2[level][p2Count] , pl2[level][p2Count+1] , pl1[level][i].first ) : "
<< function_value(pl2.land[level][p2Count], pl2.land[level][p2Count + 1], pl1.land[level][i].first)
<< std::endl;
- std::cerr << "val : " << val << std::endl;
+ std::clog << "val : " << val << std::endl;
std::cin.ignore();
}
}
}
- if (dbg) std::cerr << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl;
+ if (dbg) std::clog << "minimalNumberOfLevels : " << minimalNumberOfLevels << std::endl;
if (minimalNumberOfLevels < pl1.land.size()) {
for (size_t level = minimalNumberOfLevels; level != pl1.land.size(); ++level) {
for (size_t i = 0; i != pl1.land[level].size(); ++i) {
- if (dbg) std::cerr << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
+ if (dbg) std::clog << "pl1[level][i].second : " << pl1.land[level][i].second << std::endl;
if (maxDist < pl1.land[level][i].second) maxDist = pl1.land[level][i].second;
}
}
@@ -1181,7 +1181,7 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const
lan = lan.abs();
if (dbg) {
- std::cerr << "Abs of difference ; " << lan << std::endl;
+ std::clog << "Abs of difference ; " << lan << std::endl;
getchar();
}
@@ -1189,17 +1189,17 @@ double compute_distance_of_landscapes(const Persistence_landscape& first, const
// \int_{- \infty}^{+\infty}| first-second |^p
double result;
if (p != 1) {
- if (dbg) std::cerr << "Power != 1, compute integral to the power p\n";
+ if (dbg) std::clog << "Power != 1, compute integral to the power p\n";
result = lan.compute_integral_of_landscape(p);
} else {
- if (dbg) std::cerr << "Power = 1, compute integral \n";
+ if (dbg) std::clog << "Power = 1, compute integral \n";
result = lan.compute_integral_of_landscape();
}
// (\int_{- \infty}^{+\infty}| first-second |^p)^(1/p)
return pow(result, 1.0 / p);
} else {
// p == infty
- if (dbg) std::cerr << "Power = infty, compute maximum \n";
+ if (dbg) std::clog << "Power = infty, compute maximum \n";
return lan.compute_maximum();
}
}
@@ -1220,7 +1220,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_
for (size_t level = 0; level != std::min(l1.size(), l2.size()); ++level) {
if (dbg) {
- std::cerr << "Computing inner product for a level : " << level << std::endl;
+ std::clog << "Computing inner product for a level : " << level << std::endl;
getchar();
}
auto&& l1_land_level = l1.land[level];
@@ -1267,14 +1267,14 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_
result += contributionFromThisPart;
if (dbg) {
- std::cerr << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first
+ std::clog << "[l1_land_level[l1It].first,l1_land_level[l1It+1].first] : " << l1_land_level[l1It].first
<< " , " << l1_land_level[l1It + 1].first << std::endl;
- std::cerr << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first
+ std::clog << "[l2_land_level[l2It].first,l2_land_level[l2It+1].first] : " << l2_land_level[l2It].first
<< " , " << l2_land_level[l2It + 1].first << std::endl;
- std::cerr << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl;
- std::cerr << "x1 : " << x1 << " , x2 : " << x2 << std::endl;
- std::cerr << "contributionFromThisPart : " << contributionFromThisPart << std::endl;
- std::cerr << "result : " << result << std::endl;
+ std::clog << "a : " << a << ", b : " << b << " , c: " << c << ", d : " << d << std::endl;
+ std::clog << "x1 : " << x1 << " , x2 : " << x2 << std::endl;
+ std::clog << "contributionFromThisPart : " << contributionFromThisPart << std::endl;
+ std::clog << "result : " << result << std::endl;
getchar();
}
@@ -1290,11 +1290,11 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_
// in this case, we increment both:
++l2It;
if (dbg) {
- std::cerr << "Incrementing both \n";
+ std::clog << "Incrementing both \n";
}
} else {
if (dbg) {
- std::cerr << "Incrementing first \n";
+ std::clog << "Incrementing first \n";
}
}
++l1It;
@@ -1302,7 +1302,7 @@ double compute_inner_product(const Persistence_landscape& l1, const Persistence_
// in this case we increment l2It
++l2It;
if (dbg) {
- std::cerr << "Incrementing second \n";
+ std::clog << "Incrementing second \n";
}
}
@@ -1361,7 +1361,7 @@ void Persistence_landscape::plot(const char* filename, double xRangeBegin, doubl
}
out << "EOF" << std::endl;
}
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
}
diff --git a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h
index 68bce336..537131da 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_landscape_on_grid.h
@@ -155,9 +155,9 @@ class Persistence_landscape_on_grid {
double dx = (this->grid_max - this->grid_min) / static_cast<double>(this->values_of_landscapes.size() - 1);
if (dbg) {
- std::cerr << "this->grid_max : " << this->grid_max << std::endl;
- std::cerr << "this->grid_min : " << this->grid_min << std::endl;
- std::cerr << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl;
+ std::clog << "this->grid_max : " << this->grid_max << std::endl;
+ std::clog << "this->grid_min : " << this->grid_min << std::endl;
+ std::clog << "this->values_of_landscapes.size() : " << this->values_of_landscapes.size() << std::endl;
getchar();
}
@@ -169,14 +169,14 @@ class Persistence_landscape_on_grid {
if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level];
if (dbg) {
- std::cerr << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size()
+ std::clog << "this->values_of_landscapes[i].size() : " << this->values_of_landscapes[i].size()
<< " , level : " << level << std::endl;
if (this->values_of_landscapes[i].size() > level)
- std::cerr << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl;
- std::cerr << "previous_y : " << previous_y << std::endl;
- std::cerr << "current_y : " << current_y << std::endl;
- std::cerr << "dx : " << dx << std::endl;
- std::cerr << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl;
+ std::clog << "this->values_of_landscapes[i][level] : " << this->values_of_landscapes[i][level] << std::endl;
+ std::clog << "previous_y : " << previous_y << std::endl;
+ std::clog << "current_y : " << current_y << std::endl;
+ std::clog << "dx : " << dx << std::endl;
+ std::clog << "0.5*dx*( previous_y + current_y ); " << 0.5 * dx * (previous_y + current_y) << std::endl;
}
result += 0.5 * dx * (previous_y + current_y);
@@ -213,10 +213,10 @@ class Persistence_landscape_on_grid {
if (this->values_of_landscapes[0].size() > level) previous_y = this->values_of_landscapes[0][level];
if (dbg) {
- std::cerr << "dx : " << dx << std::endl;
- std::cerr << "previous_x : " << previous_x << std::endl;
- std::cerr << "previous_y : " << previous_y << std::endl;
- std::cerr << "power : " << p << std::endl;
+ std::clog << "dx : " << dx << std::endl;
+ std::clog << "previous_x : " << previous_x << std::endl;
+ std::clog << "previous_y : " << previous_y << std::endl;
+ std::clog << "power : " << p << std::endl;
getchar();
}
@@ -225,7 +225,7 @@ class Persistence_landscape_on_grid {
double current_y = 0;
if (this->values_of_landscapes[i].size() > level) current_y = this->values_of_landscapes[i][level];
- if (dbg) std::cerr << "current_y : " << current_y << std::endl;
+ if (dbg) std::clog << "current_y : " << current_y << std::endl;
if (current_y == previous_y) continue;
@@ -235,7 +235,7 @@ class Persistence_landscape_on_grid {
double b = coef.second;
if (dbg) {
- std::cerr << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x
+ std::clog << "A line passing through points : (" << previous_x << "," << previous_y << ") and (" << current_x
<< "," << current_y << ") is : " << a << "x+" << b << std::endl;
}
@@ -249,14 +249,14 @@ class Persistence_landscape_on_grid {
}
result += value_to_add;
if (dbg) {
- std::cerr << "Increasing result by : " << value_to_add << std::endl;
- std::cerr << "result : " << result << std::endl;
+ std::clog << "Increasing result by : " << value_to_add << std::endl;
+ std::clog << "result : " << result << std::endl;
getchar();
}
previous_x = current_x;
previous_y = current_y;
}
- if (dbg) std::cerr << "The total result is : " << result << std::endl;
+ if (dbg) std::clog << "The total result is : " << result << std::endl;
return result;
}
@@ -297,10 +297,10 @@ class Persistence_landscape_on_grid {
size_t position = size_t((x - this->grid_min) / dx);
if (dbg) {
- std::cerr << "This is a procedure compute_value_at_a_given_point \n";
- std::cerr << "level : " << level << std::endl;
- std::cerr << "x : " << x << std::endl;
- std::cerr << "position : " << position << std::endl;
+ std::clog << "This is a procedure compute_value_at_a_given_point \n";
+ std::clog << "level : " << level << std::endl;
+ std::clog << "x : " << x << std::endl;
+ std::clog << "position : " << position << std::endl;
}
// check if we are not exactly in the grid point:
if (almost_equal(position * dx + this->grid_min, x)) {
@@ -432,23 +432,23 @@ class Persistence_landscape_on_grid {
bool operator==(const Persistence_landscape_on_grid& rhs) const {
bool dbg = true;
if (this->values_of_landscapes.size() != rhs.values_of_landscapes.size()) {
- if (dbg) std::cerr << "values_of_landscapes of incompatible sizes\n";
+ if (dbg) std::clog << "values_of_landscapes of incompatible sizes\n";
return false;
}
if (!almost_equal(this->grid_min, rhs.grid_min)) {
- if (dbg) std::cerr << "grid_min not equal\n";
+ if (dbg) std::clog << "grid_min not equal\n";
return false;
}
if (!almost_equal(this->grid_max, rhs.grid_max)) {
- if (dbg) std::cerr << "grid_max not equal\n";
+ if (dbg) std::clog << "grid_max not equal\n";
return false;
}
for (size_t i = 0; i != this->values_of_landscapes.size(); ++i) {
for (size_t aa = 0; aa != this->values_of_landscapes[i].size(); ++aa) {
if (!almost_equal(this->values_of_landscapes[i][aa], rhs.values_of_landscapes[i][aa])) {
if (dbg) {
- std::cerr << "Problem in the position : " << i << " of values_of_landscapes. \n";
- std::cerr << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl;
+ std::clog << "Problem in the position : " << i << " of values_of_landscapes. \n";
+ std::clog << this->values_of_landscapes[i][aa] << " " << rhs.values_of_landscapes[i][aa] << std::endl;
}
return false;
}
@@ -615,7 +615,7 @@ class Persistence_landscape_on_grid {
double previous_y_l1 = 0;
double previous_y_l2 = 0;
for (size_t i = 0; i != l1.values_of_landscapes.size(); ++i) {
- if (dbg) std::cerr << "i : " << i << std::endl;
+ if (dbg) std::clog << "i : " << i << std::endl;
double current_x = previous_x + dx;
double current_y_l1 = 0;
@@ -625,11 +625,11 @@ class Persistence_landscape_on_grid {
if (l2.values_of_landscapes[i].size() > level) current_y_l2 = l2.values_of_landscapes[i][level];
if (dbg) {
- std::cerr << "previous_x : " << previous_x << std::endl;
- std::cerr << "previous_y_l1 : " << previous_y_l1 << std::endl;
- std::cerr << "current_y_l1 : " << current_y_l1 << std::endl;
- std::cerr << "previous_y_l2 : " << previous_y_l2 << std::endl;
- std::cerr << "current_y_l2 : " << current_y_l2 << std::endl;
+ std::clog << "previous_x : " << previous_x << std::endl;
+ std::clog << "previous_y_l1 : " << previous_y_l1 << std::endl;
+ std::clog << "current_y_l1 : " << current_y_l1 << std::endl;
+ std::clog << "previous_y_l2 : " << previous_y_l2 << std::endl;
+ std::clog << "current_y_l2 : " << current_y_l2 << std::endl;
}
std::pair<double, double> l1_coords = compute_parameters_of_a_line(std::make_pair(previous_x, previous_y_l1),
@@ -646,11 +646,11 @@ class Persistence_landscape_on_grid {
double d = l2_coords.second;
if (dbg) {
- std::cerr << "Here are the formulas for a line: \n";
- std::cerr << "a : " << a << std::endl;
- std::cerr << "b : " << b << std::endl;
- std::cerr << "c : " << c << std::endl;
- std::cerr << "d : " << d << std::endl;
+ std::clog << "Here are the formulas for a line: \n";
+ std::clog << "a : " << a << std::endl;
+ std::clog << "b : " << b << std::endl;
+ std::clog << "c : " << c << std::endl;
+ std::clog << "d : " << d << std::endl;
}
// now, to compute the inner product in this interval we need to compute the integral of (ax+b)(cx+d) = acx^2 +
@@ -663,11 +663,11 @@ class Persistence_landscape_on_grid {
(a * d + b * c) / 2 * previous_x * previous_x + b * d * previous_x);
if (dbg) {
- std::cerr << "Value of the integral on the left end i.e. : " << previous_x << " is : "
+ std::clog << "Value of the integral on the left end i.e. : " << previous_x << " is : "
<< a * c / 3 * previous_x * previous_x * previous_x + (a * d + b * c) / 2 * previous_x * previous_x +
b * d * previous_x
<< std::endl;
- std::cerr << "Value of the integral on the right end i.e. : " << current_x << " is "
+ std::clog << "Value of the integral on the right end i.e. : " << current_x << " is "
<< a * c / 3 * current_x * current_x * current_x + (a * d + b * c) / 2 * current_x * current_x +
b * d * current_x
<< std::endl;
@@ -676,8 +676,8 @@ class Persistence_landscape_on_grid {
result += added_value;
if (dbg) {
- std::cerr << "added_value : " << added_value << std::endl;
- std::cerr << "result : " << result << std::endl;
+ std::clog << "added_value : " << added_value << std::endl;
+ std::clog << "result : " << result << std::endl;
getchar();
}
@@ -703,8 +703,8 @@ class Persistence_landscape_on_grid {
// time:
if (dbg) {
- std::cerr << "first : " << first << std::endl;
- std::cerr << "second : " << second << std::endl;
+ std::clog << "first : " << first << std::endl;
+ std::clog << "second : " << second << std::endl;
getchar();
}
@@ -712,14 +712,14 @@ class Persistence_landscape_on_grid {
Persistence_landscape_on_grid lan = first - second;
if (dbg) {
- std::cerr << "Difference : " << lan << std::endl;
+ std::clog << "Difference : " << lan << std::endl;
}
//| first-second |:
lan.abs();
if (dbg) {
- std::cerr << "Abs : " << lan << std::endl;
+ std::clog << "Abs : " << lan << std::endl;
}
if (p < std::numeric_limits<double>::max()) {
@@ -727,18 +727,18 @@ class Persistence_landscape_on_grid {
double result;
if (p != 1) {
if (dbg) {
- std::cerr << "p : " << p << std::endl;
+ std::clog << "p : " << p << std::endl;
getchar();
}
result = lan.compute_integral_of_landscape(p);
if (dbg) {
- std::cerr << "integral : " << result << std::endl;
+ std::clog << "integral : " << result << std::endl;
getchar();
}
} else {
result = lan.compute_integral_of_landscape();
if (dbg) {
- std::cerr << "integral, without power : " << result << std::endl;
+ std::clog << "integral, without power : " << result << std::endl;
getchar();
}
}
@@ -820,7 +820,7 @@ class Persistence_landscape_on_grid {
this->grid_max = (to_average[0])->grid_max;
if (dbg) {
- std::cerr << "Computations of average. The data from the current landscape have been cleared. We are ready to do "
+ std::clog << "Computations of average. The data from the current landscape have been cleared. We are ready to do "
"the computations. \n";
}
@@ -835,7 +835,7 @@ class Persistence_landscape_on_grid {
this->values_of_landscapes[grid_point] = std::vector<double>(maximal_size_of_vector);
if (dbg) {
- std::cerr << "We are considering the point : " << grid_point
+ std::clog << "We are considering the point : " << grid_point
<< " of the grid. In this point, there are at most : " << maximal_size_of_vector
<< " nonzero landscape functions \n";
}
@@ -931,12 +931,12 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto
size_t number_of_points_, unsigned number_of_levels) {
bool dbg = false;
if (dbg) {
- std::cerr << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_
+ std::clog << "Here is the procedure : set_up_values_of_landscapes. The parameters are : grid_min_ : " << grid_min_
<< ", grid_max_ : " << grid_max_ << ", number_of_points_ : " << number_of_points_
<< ", number_of_levels: " << number_of_levels << std::endl;
- std::cerr << "Here are the intervals at our disposal : \n";
+ std::clog << "Here are the intervals at our disposal : \n";
for (size_t i = 0; i != p.size(); ++i) {
- std::cerr << p[i].first << " , " << p[i].second << std::endl;
+ std::clog << p[i].first << " , " << p[i].second << std::endl;
}
}
@@ -976,17 +976,17 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto
size_t grid_interval_midpoint = (size_t)(0.5 * (grid_interval_begin + grid_interval_end));
if (dbg) {
- std::cerr << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl;
+ std::clog << "Considering an interval : " << p[int_no].first << "," << p[int_no].second << std::endl;
- std::cerr << "grid_interval_begin : " << grid_interval_begin << std::endl;
- std::cerr << "grid_interval_end : " << grid_interval_end << std::endl;
- std::cerr << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl;
+ std::clog << "grid_interval_begin : " << grid_interval_begin << std::endl;
+ std::clog << "grid_interval_end : " << grid_interval_end << std::endl;
+ std::clog << "grid_interval_midpoint : " << grid_interval_midpoint << std::endl;
}
double landscape_value = dx;
for (size_t i = grid_interval_begin + 1; i < grid_interval_midpoint; ++i) {
if (dbg) {
- std::cerr << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value
+ std::clog << "Adding landscape value (going up) for a point : " << i << " equal : " << landscape_value
<< std::endl;
}
if (number_of_levels != std::numeric_limits<unsigned>::max()) {
@@ -1044,7 +1044,7 @@ void Persistence_landscape_on_grid::set_up_values_of_landscapes(const std::vecto
}
if (dbg) {
- std::cerr << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value
+ std::clog << "Adding landscape value (going down) for a point : " << i << " equal : " << landscape_value
<< std::endl;
}
}
@@ -1246,7 +1246,7 @@ void Persistence_landscape_on_grid::plot(const char* filename, double min_x, dou
}
out << "EOF" << std::endl;
}
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
}
diff --git a/src/Persistence_representations/include/gudhi/Persistence_vectors.h b/src/Persistence_representations/include/gudhi/Persistence_vectors.h
index 6776f4a3..fab96900 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_vectors.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_vectors.h
@@ -189,7 +189,7 @@ class Vector_distances_in_diagram {
}
out << std::endl;
out.close();
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
}
@@ -360,9 +360,9 @@ template <typename F>
void Vector_distances_in_diagram<F>::compute_sorted_vector_of_distances_via_heap(size_t where_to_cut) {
bool dbg = false;
if (dbg) {
- std::cerr << "Here are the intervals : \n";
+ std::clog << "Here are the intervals : \n";
for (size_t i = 0; i != this->intervals.size(); ++i) {
- std::cerr << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
+ std::clog << this->intervals[i].first << " , " << this->intervals[i].second << std::endl;
}
}
where_to_cut = std::min(
@@ -385,14 +385,14 @@ void Vector_distances_in_diagram<F>::compute_sorted_vector_of_distances_via_heap
0.5 * (this->intervals[j].first + this->intervals[j].second)))));
if (dbg) {
- std::cerr << "Value : " << value << std::endl;
- std::cerr << "heap.front() : " << heap.front() << std::endl;
+ std::clog << "Value : " << value << std::endl;
+ std::clog << "heap.front() : " << heap.front() << std::endl;
getchar();
}
if (-value < heap.front()) {
if (dbg) {
- std::cerr << "Replacing : " << heap.front() << " with : " << -value << std::endl;
+ std::clog << "Replacing : " << heap.front() << " with : " << -value << std::endl;
getchar();
}
// remove the first element from the heap
@@ -431,11 +431,11 @@ void Vector_distances_in_diagram<F>::compute_sorted_vector_of_distances_via_heap
}
if (dbg) {
- std::cerr << "This is the heap after all the operations :\n";
+ std::clog << "This is the heap after all the operations :\n";
for (size_t i = 0; i != heap.size(); ++i) {
- std::cout << heap[i] << " ";
+ std::clog << heap[i] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
this->sorted_vector_of_distances = heap;
@@ -519,11 +519,11 @@ double Vector_distances_in_diagram<F>::distance(const Vector_distances_in_diagra
bool dbg = false;
if (dbg) {
- std::cerr << "Entering double Vector_distances_in_diagram<F>::distance( const Abs_Topological_data_with_distances* "
+ std::clog << "Entering double Vector_distances_in_diagram<F>::distance( const Abs_Topological_data_with_distances* "
"second , double power ) procedure \n";
- std::cerr << "Power : " << power << std::endl;
- std::cerr << "This : " << *this << std::endl;
- std::cerr << "second : " << second_ << std::endl;
+ std::clog << "Power : " << power << std::endl;
+ std::clog << "This : " << *this << std::endl;
+ std::clog << "second : " << second_ << std::endl;
}
double result = 0;
@@ -531,7 +531,7 @@ double Vector_distances_in_diagram<F>::distance(const Vector_distances_in_diagra
++i) {
if (power == 1) {
if (dbg) {
- std::cerr << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
+ std::clog << "|" << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
<< " | : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i])
<< std::endl;
}
@@ -545,7 +545,7 @@ double Vector_distances_in_diagram<F>::distance(const Vector_distances_in_diagra
result = fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i]);
}
if (dbg) {
- std::cerr << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
+ std::clog << "| " << this->sorted_vector_of_distances[i] << " - " << second_.sorted_vector_of_distances[i]
<< " : " << fabs(this->sorted_vector_of_distances[i] - second_.sorted_vector_of_distances[i])
<< std::endl;
}
diff --git a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h
index 5c2d2038..a5bc1bca 100644
--- a/src/Persistence_representations/include/gudhi/read_persistence_from_file.h
+++ b/src/Persistence_representations/include/gudhi/read_persistence_from_file.h
@@ -50,28 +50,28 @@ std::vector<std::pair<double, double> > read_persistence_intervals_in_one_dimens
final_barcode.reserve(barcode_initial.size());
if (dbg) {
- std::cerr << "Here are the intervals that we read from the file : \n";
+ std::clog << "Here are the intervals that we read from the file : \n";
for (size_t i = 0; i != barcode_initial.size(); ++i) {
- std::cout << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl;
+ std::clog << barcode_initial[i].first << " " << barcode_initial[i].second << std::endl;
}
getchar();
}
for (size_t i = 0; i != barcode_initial.size(); ++i) {
if (dbg) {
- std::cout << "COnsidering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second
+ std::clog << "Considering interval : " << barcode_initial[i].first << " " << barcode_initial[i].second
<< std::endl;
}
if (barcode_initial[i].first > barcode_initial[i].second) {
// note that in this case barcode_initial[i].second != std::numeric_limits<double>::infinity()
- if (dbg) std::cout << "Swap and enter \n";
+ if (dbg) std::clog << "Swap and enter \n";
// swap them to make sure that birth < death
final_barcode.push_back(std::pair<double, double>(barcode_initial[i].second, barcode_initial[i].first));
continue;
} else {
if (barcode_initial[i].second != std::numeric_limits<double>::infinity()) {
- if (dbg) std::cout << "Simply enters\n";
+ if (dbg) std::clog << "Simply enters\n";
// in this case, due to the previous conditions we know that barcode_initial[i].first <
// barcode_initial[i].second, so we put them as they are
final_barcode.push_back(std::pair<double, double>(barcode_initial[i].first, barcode_initial[i].second));
@@ -91,11 +91,11 @@ std::vector<std::pair<double, double> > read_persistence_intervals_in_one_dimens
}
if (dbg) {
- std::cerr << "Here are the final bars that we are sending further : \n";
+ std::clog << "Here are the final bars that we are sending further : \n";
for (size_t i = 0; i != final_barcode.size(); ++i) {
- std::cout << final_barcode[i].first << " " << final_barcode[i].second << std::endl;
+ std::clog << final_barcode[i].first << " " << final_barcode[i].second << std::endl;
}
- std::cerr << "final_barcode.size() : " << final_barcode.size() << std::endl;
+ std::clog << "final_barcode.size() : " << final_barcode.size() << std::endl;
getchar();
}
diff --git a/src/Persistence_representations/test/persistence_heat_maps_test.cpp b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
index b3240758..bf531773 100644
--- a/src/Persistence_representations/test/persistence_heat_maps_test.cpp
+++ b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
@@ -78,7 +78,7 @@ BOOST_AUTO_TEST_CASE(check_compute_percentage_of_active_of_heat_maps) {
to_compute_percentage_of_active.push_back(&q);
to_compute_percentage_of_active.push_back(&r);
Persistence_heat_maps<constant_scaling_function> percentage_of_active;
- percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0.1);
+ percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0);
Persistence_heat_maps<constant_scaling_function> template_percentage_of_active;
template_percentage_of_active.load_from_file("data/template_percentage_of_active_of_heat_maps");
diff --git a/src/Persistence_representations/test/persistence_lanscapes_test.cpp b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
index 21ef18a0..59924f16 100644
--- a/src/Persistence_representations/test/persistence_lanscapes_test.cpp
+++ b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
@@ -238,7 +238,7 @@ if ( argc != 2 )
double integral = p.compute_integral_of_landscape();
cout << "integral : " << integral <<endl;
- //compute integral for each level separatelly
+ //compute integral for each level separately
for ( size_t level = 0 ; level != p.size() ; ++level )
{
cout << p.compute_integral_of_landscape( level ) << endl;
diff --git a/src/Persistence_representations/utilities/CMakeLists.txt b/src/Persistence_representations/utilities/CMakeLists.txt
index fc51b1d6..85633b7b 100644
--- a/src/Persistence_representations/utilities/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/CMakeLists.txt
@@ -14,7 +14,7 @@ function(add_persistence_representation_creation_utility creation_utility)
install(TARGETS ${creation_utility} DESTINATION bin)
endfunction(add_persistence_representation_creation_utility)
-function(add_persistence_representation_plot_utility plot_utility tool_extension)
+function(add_persistence_representation_plot_utility creation_utility plot_utility tool_extension)
add_executable ( ${plot_utility} ${plot_utility}.cpp )
# as the function is called in a subdirectory level, need to '../' to find persistence heat maps files
@@ -22,17 +22,21 @@ function(add_persistence_representation_plot_utility plot_utility tool_extension
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}")
#add_test(NAME Persistence_representation_utilities_${plot_utility}_second COMMAND $<TARGET_FILE:${plot_utility}>
# "${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}")
+ set_tests_properties(Persistence_representation_utilities_${plot_utility}_first PROPERTIES DEPENDS
+ Persistence_representation_utilities_${creation_utility})
if(GNUPLOT_PATH)
add_test(NAME Persistence_representation_utilities_${plot_utility}_first_gnuplot COMMAND ${GNUPLOT_PATH}
"-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}_GnuplotScript'")
#add_test(NAME Persistence_representation_utilities_${plot_utility}_second_gnuplot COMMAND ${GNUPLOT_PATH}
# "-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}_GnuplotScript'")
+ set_tests_properties(Persistence_representation_utilities_${plot_utility}_first_gnuplot PROPERTIES DEPENDS
+ Persistence_representation_utilities_${plot_utility}_first)
endif()
install(TARGETS ${plot_utility} DESTINATION bin)
endfunction(add_persistence_representation_plot_utility)
-function(add_persistence_representation_function_utility function_utility tool_extension)
+function(add_persistence_representation_function_utility creation_utility function_utility tool_extension)
add_executable ( ${function_utility} ${function_utility}.cpp )
# ARGV2 is an optional argument
@@ -48,6 +52,8 @@ function(add_persistence_representation_function_utility function_utility tool_e
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}"
"${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}")
endif()
+ set_tests_properties(Persistence_representation_utilities_${function_utility} PROPERTIES DEPENDS
+ Persistence_representation_utilities_${creation_utility})
install(TARGETS ${function_utility} DESTINATION bin)
endfunction(add_persistence_representation_function_utility)
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
index 89ef232f..e4c471c2 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
@@ -2,13 +2,24 @@ project(Persistence_representations_heat_maps_utilities)
add_persistence_representation_creation_utility(create_pssk "10" "-1" "-1" "4" "-1")
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_arctan_of_their_persistence "10" "-1" "-1" "4" "-1")
+
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_distance_from_diagonal "10" "-1" "-1" "4" "-1")
+# Tests output the same file
+set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_arctan_of_their_persistence)
+
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_squared_diag_distance "10" "-1" "-1" "4" "-1")
+# Tests output the same file
+set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal)
+
# Need to set grid min and max for further average, distance and scalar_product
add_persistence_representation_creation_utility(create_persistence_heat_maps "10" "0" "35" "10" "-1")
+set_tests_properties(Persistence_representation_utilities_create_persistence_heat_maps PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance)
-add_persistence_representation_plot_utility(plot_persistence_heat_map ".mps")
+add_persistence_representation_plot_utility(create_persistence_heat_maps plot_persistence_heat_map ".mps")
-add_persistence_representation_function_utility(average_persistence_heat_maps ".mps")
-add_persistence_representation_function_utility(compute_distance_of_persistence_heat_maps ".mps" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_persistence_heat_maps ".mps")
+add_persistence_representation_function_utility(create_persistence_heat_maps average_persistence_heat_maps ".mps")
+add_persistence_representation_function_utility(create_persistence_heat_maps compute_distance_of_persistence_heat_maps ".mps" "1")
+add_persistence_representation_function_utility(create_persistence_heat_maps compute_scalar_product_of_persistence_heat_maps ".mps")
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp
index 3d088b58..54b1f77d 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/average_persistence_heat_maps.cpp
@@ -17,12 +17,12 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<constant_scaling_function>;
int main(int argc, char** argv) {
- std::cout << "This program computes average of persistence heat maps stored in files (the files needs to be "
+ std::clog << "This program computes average of persistence heat maps stored in files (the files needs to be "
<< "created beforehand).\n"
<< "The parameters of this programs are names of files with persistence heat maps.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -46,6 +46,6 @@ int main(int argc, char** argv) {
delete maps[i];
}
- std::cout << "Average can be found in 'average.mps' file\n";
+ std::clog << "Average can be found in 'average.mps' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
index 48000bb1..757a97fc 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
@@ -19,14 +19,14 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<constant_scaling_function>;
int main(int argc, char** argv) {
- std::cout << "This program computes distance of persistence heat maps stored in files (the files needs to be "
+ std::clog << "This program computes distance of persistence heat maps stored in files (the files needs to be "
<< "created beforehand).\n"
<< "The first parameter of a program is an integer p. The program compute L^p distance of the two heat "
<< "maps. For L^infty distance choose p = -1. \n"
<< "The remaining parameters of this program are names of files with persistence heat maps.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -69,14 +69,14 @@ int main(int argc, char** argv) {
out.open("distance.mps");
for (size_t i = 0; i != distance.size(); ++i) {
for (size_t j = 0; j != distance.size(); ++j) {
- std::cout << distance[i][j] << " ";
+ std::clog << distance[i][j] << " ";
out << distance[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'distance.mps' file\n";
+ std::clog << "Distance can be found in 'distance.mps' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
index 8a96f1b0..e7f18ce1 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
@@ -18,12 +18,12 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<constant_scaling_function>;
int main(int argc, char** argv) {
- std::cout << "This program computes scalar product of persistence heat maps stored in a file (the file needs to be "
+ std::clog << "This program computes scalar product of persistence heat maps stored in a file (the file needs to be "
<< "created beforehand). \n"
<< "The parameters of this programs are names of files with persistence heat maps.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -60,14 +60,14 @@ int main(int argc, char** argv) {
out.open("scalar_product.mps");
for (size_t i = 0; i != scalar_product.size(); ++i) {
for (size_t j = 0; j != scalar_product.size(); ++j) {
- std::cout << scalar_product[i][j] << " ";
+ std::clog << scalar_product[i][j] << " ";
out << scalar_product[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'scalar_product.mps' file\n";
+ std::clog << "Distance can be found in 'scalar_product.mps' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
index f82a39b0..6b38b930 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
@@ -20,7 +20,7 @@ using Persistence_heat_maps =
Gudhi::Persistence_representations::Persistence_heat_maps<arc_tan_of_persistence_of_point>;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
<< "provided as an input.The Gaussian kernels are weighted by the arc tangential of their persistence.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
<< "The second and third parameters are min and max of the grid. If you want those numbers to be computed "
@@ -36,7 +36,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 7) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -58,7 +58,7 @@ int main(int argc, char** argv) {
std::vector<std::vector<double> > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1);
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl;
+ std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl;
Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension);
std::stringstream ss;
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
index 5a657b13..fece2e36 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
@@ -19,7 +19,7 @@ using distance_from_diagonal_scaling = Gudhi::Persistence_representations::dista
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<distance_from_diagonal_scaling>;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
<< "provided as an input.The Gaussian kernels are weighted by the distance of a center from the "
<< "diagonal.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
@@ -36,7 +36,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 7) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -58,7 +58,7 @@ int main(int argc, char** argv) {
std::vector<std::vector<double> > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1);
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl;
+ std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl;
Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension);
std::stringstream ss;
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
index 8d67a54d..86e6fc19 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
@@ -21,7 +21,7 @@ using Persistence_heat_maps =
Gudhi::Persistence_representations::Persistence_heat_maps<squared_distance_from_diagonal_scaling>;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
<< "provided as an input.The Gaussian kernels are weighted by the square of distance of a center from the "
<< "diagonal.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
@@ -38,7 +38,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 7) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -60,7 +60,7 @@ int main(int argc, char** argv) {
std::vector<std::vector<double> > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1);
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a heat map based on a file : " << filenames[i] << std::endl;
+ std::clog << "Creating a heat map based on a file : " << filenames[i] << std::endl;
Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension);
std::stringstream ss;
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp
index 29170c32..ca27f8e3 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_persistence_heat_maps.cpp
@@ -19,7 +19,7 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<constant_scaling_function>;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence heat map files (*.mps) of persistence diagrams files (*.pers) "
<< "provided as an input.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
<< "The second and third parameters are min and max of the grid. If you want those numbers to be computed "
@@ -35,7 +35,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 7) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
size_t size_of_grid = (size_t)atoi(argv[1]);
@@ -55,7 +55,7 @@ int main(int argc, char** argv) {
std::vector<std::vector<double> > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1);
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a heat map based on file : " << filenames[i] << std::endl;
+ std::clog << "Creating a heat map based on file : " << filenames[i] << std::endl;
Persistence_heat_maps l(filenames[i], filter, false, size_of_grid, min_, max_, dimension);
std::stringstream ss;
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp
index 995771b9..d2ebcc7e 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/create_pssk.cpp
@@ -18,7 +18,7 @@
using PSSK = Gudhi::Persistence_representations::PSSK;
int main(int argc, char** argv) {
- std::cout << "This program creates PSSK files (*.pssk) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates PSSK files (*.pssk) of persistence diagrams files (*.pers) "
<< "provided as an input.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
<< "The second and third parameters are min and max of the grid. If you want those numbers to be computed "
@@ -34,7 +34,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 7) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -56,7 +56,7 @@ int main(int argc, char** argv) {
std::vector<std::vector<double> > filter = Gudhi::Persistence_representations::create_Gaussian_filter(stdiv, 1);
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a PSSK based on a file : " << filenames[i] << std::endl;
+ std::clog << "Creating a PSSK based on a file : " << filenames[i] << std::endl;
PSSK l(filenames[i], filter, size_of_grid, min_, max_, dimension);
std::stringstream ss;
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp b/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp
index cf6e07cb..87cc97d1 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/plot_persistence_heat_map.cpp
@@ -17,10 +17,10 @@ using constant_scaling_function = Gudhi::Persistence_representations::constant_s
using Persistence_heat_maps = Gudhi::Persistence_representations::Persistence_heat_maps<constant_scaling_function>;
int main(int argc, char** argv) {
- std::cout << "This program creates a gnuplot script from a persistence heat maps stored in a file (the file needs "
+ std::clog << "This program creates a gnuplot script from a persistence heat maps stored in a file (the file needs "
<< "to be created beforehand). Please call the code with the name of a single heat maps file \n";
if (argc != 2) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
Persistence_heat_maps l;
diff --git a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
index a025183e..118c1e9b 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
@@ -3,17 +3,16 @@ project(Persistence_representations_intervals_utilities)
add_executable ( plot_histogram_of_intervals_lengths plot_histogram_of_intervals_lengths.cpp )
-add_test(NAME plot_histogram_of_intervals_lengths COMMAND $<TARGET_FILE:plot_histogram_of_intervals_lengths>
+add_test(NAME Persistence_representation_utilities_plot_histogram_of_intervals_lengths COMMAND $<TARGET_FILE:plot_histogram_of_intervals_lengths>
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers" "-1")
install(TARGETS plot_histogram_of_intervals_lengths DESTINATION bin)
-add_persistence_representation_plot_utility(plot_persistence_intervals "")
-add_persistence_representation_plot_utility(plot_persistence_Betti_numbers "")
+add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_intervals "")
+add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_Betti_numbers "")
add_persistence_representation_creation_utility(compute_birth_death_range_in_persistence_diagram "-1")
-
add_executable ( compute_number_of_dominant_intervals compute_number_of_dominant_intervals.cpp )
add_test(NAME Persistence_representation_utilities_compute_number_of_dominant_intervals
COMMAND $<TARGET_FILE:compute_number_of_dominant_intervals>
diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
index 519cc47d..72325cad 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
@@ -18,7 +18,7 @@
using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals;
int main(int argc, char** argv) {
- std::cout << "This program computes the range of birth and death times of persistence pairs in diagrams provided as "
+ std::clog << "This program computes the range of birth and death times of persistence pairs in diagrams provided as "
<< "an input.\n"
<< "The first parameter is the dimension of persistence to be used to create persistence intervals. "
<< "If your file contains the information about dimension of persistence pairs, please provide here the "
@@ -27,7 +27,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters of the program are the names of files with persistence diagrams.\n";
if (argc < 3) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -45,12 +45,12 @@ int main(int argc, char** argv) {
double max_ = -std::numeric_limits<double>::max();
for (size_t file_no = 0; file_no != filenames.size(); ++file_no) {
- std::cout << "Creating diagram based on a file : " << filenames[file_no] << std::endl;
+ std::clog << "Creating diagram based on a file : " << filenames[file_no] << std::endl;
Persistence_intervals p(filenames[file_no], dimension);
std::pair<double, double> min_max_ = p.get_x_range();
if (min_max_.first < min_) min_ = min_max_.first;
if (min_max_.second > max_) max_ = min_max_.second;
}
- std::cout << "Birth-death range : min: " << min_ << ", max: " << max_ << std::endl;
+ std::clog << "Birth-death range : min: " << min_ << ", max: " << max_ << std::endl;
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp
index 6155727a..465bf72e 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/compute_bottleneck_distance.cpp
@@ -18,7 +18,7 @@
using Persistence_intervals_with_distances = Gudhi::Persistence_representations::Persistence_intervals_with_distances;
int main(int argc, char** argv) {
- std::cout << "This program computes the bottleneck distance of persistence pairs in diagrams provided as "
+ std::clog << "This program computes the bottleneck distance of persistence pairs in diagrams provided as "
<< "an input.\n"
<< "The first parameter is the dimension of persistence to be used to create persistence intervals. "
<< "If your file contains the information about dimension of persistence pairs, please provide here the "
@@ -27,7 +27,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters of the program are the names of files with persistence diagrams.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -70,14 +70,14 @@ int main(int argc, char** argv) {
out.open("distance.itv");
for (size_t i = 0; i != distance.size(); ++i) {
for (size_t j = 0; j != distance.size(); ++j) {
- std::cout << distance[i][j] << " ";
+ std::clog << distance[i][j] << " ";
out << distance[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'distance.itv' file\n";
+ std::clog << "Distance can be found in 'distance.itv' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp b/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp
index dd6e1a5b..ea1fe717 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/compute_number_of_dominant_intervals.cpp
@@ -18,10 +18,10 @@
using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals;
int main(int argc, char** argv) {
- std::cout << "This program compute the dominant intervals. A number of intervals to be displayed is a parameter of "
+ std::clog << "This program compute the dominant intervals. A number of intervals to be displayed is a parameter of "
"this program. \n";
if (argc != 4) {
- std::cout << "To run this program, please provide the name of a file with persistence diagram, dimension of "
+ std::clog << "To run this program, please provide the name of a file with persistence diagram, dimension of "
"intervals that should be taken into account (if your file contains only persistence pairs in a "
"single dimension, set it up to -1) and number of dominant intervals you would like to get \n";
return 1;
@@ -33,9 +33,9 @@ int main(int argc, char** argv) {
}
Persistence_intervals p(argv[1], dimension);
std::vector<std::pair<double, double> > dominant_intervals = p.dominant_intervals(atoi(argv[3]));
- std::cout << "Here are the dominant intervals : " << std::endl;
+ std::clog << "Here are the dominant intervals : " << std::endl;
for (size_t i = 0; i != dominant_intervals.size(); ++i) {
- std::cout << " " << dominant_intervals[i].first << "," << dominant_intervals[i].second << " " << std::endl;
+ std::clog << " " << dominant_intervals[i].first << "," << dominant_intervals[i].second << " " << std::endl;
}
return 0;
diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp
index 13d2133f..e5eec3f5 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/plot_histogram_of_intervals_lengths.cpp
@@ -18,10 +18,10 @@
using Persistence_intervals = Gudhi::Persistence_representations::Persistence_intervals;
int main(int argc, char** argv) {
- std::cout << "This program computes a histogram of barcode's length. A number of bins in the histogram is a "
+ std::clog << "This program computes a histogram of barcode's length. A number of bins in the histogram is a "
<< "parameter of this program. \n";
if ((argc != 3) && (argc != 4)) {
- std::cout << "To run this program, please provide the name of a file with persistence diagram and number of "
+ std::clog << "To run this program, please provide the name of a file with persistence diagram and number of "
<< "dominant intervals you would like to get. Set a negative number dominant intervals value "
<< "If your file contains only birth-death pairs.\n"
<< "The third parameter is the dimension of the persistence that is to be used. If your "
@@ -59,7 +59,7 @@ int main(int argc, char** argv) {
out << std::endl;
out.close();
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp
index 451be77f..27c69e07 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_Betti_numbers.cpp
@@ -19,7 +19,7 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in
int main(int argc, char** argv) {
if ((argc != 3) && (argc != 2)) {
- std::cout << "This program creates a gnuplot script of Betti numbers from a single persistence diagram file"
+ std::clog << "This program creates a gnuplot script of Betti numbers from a single persistence diagram file"
<< "(*.pers).\n"
<< "To run this program, please provide the name of a file with persistence diagram.\n"
<< "The second optional parameter of a program is the dimension of the persistence that is to be used. "
@@ -68,7 +68,7 @@ int main(int argc, char** argv) {
out << std::endl;
out.close();
- std::cout << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
+ std::clog << "To visualize, install gnuplot and type the command: gnuplot -persist -e \"load \'"
<< gnuplot_script.str().c_str() << "\'\"" << std::endl;
return 0;
diff --git a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp
index 09a56869..199a3266 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp
+++ b/src/Persistence_representations/utilities/persistence_intervals/plot_persistence_intervals.cpp
@@ -19,7 +19,7 @@ using Persistence_intervals = Gudhi::Persistence_representations::Persistence_in
int main(int argc, char** argv) {
if ((argc != 3) && (argc != 2)) {
- std::cout << "This program creates a gnuplot script from a single persistence diagram file (*.pers).\n"
+ std::clog << "This program creates a gnuplot script from a single persistence diagram file (*.pers).\n"
<< "To run this program, please provide the name of a file with persistence diagram.\n"
<< "The second optional parameter of a program is the dimension of the persistence that is to be used. "
<< "If your file contains only birth-death pairs, you can skip this parameter.\n";
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
index 6b24d032..4df84d36 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
@@ -2,8 +2,8 @@ project(Persistence_representations_landscapes_utilities)
add_persistence_representation_creation_utility(create_landscapes "-1")
-add_persistence_representation_plot_utility(plot_landscapes ".land")
+add_persistence_representation_plot_utility(create_landscapes plot_landscapes ".land")
-add_persistence_representation_function_utility(average_landscapes ".land")
-add_persistence_representation_function_utility(compute_distance_of_landscapes ".land" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_landscapes ".land")
+add_persistence_representation_function_utility(create_landscapes average_landscapes ".land")
+add_persistence_representation_function_utility(create_landscapes compute_distance_of_landscapes ".land" "1")
+add_persistence_representation_function_utility(create_landscapes compute_scalar_product_of_landscapes ".land")
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp
index 04a0ada4..612e9700 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes/average_landscapes.cpp
@@ -16,13 +16,13 @@
using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape;
int main(int argc, char** argv) {
- std::cout << "This program computes average of persistence landscapes stored in files (the files needs to be "
+ std::clog << "This program computes average of persistence landscapes stored in files (the files needs to be "
<< "created beforehand).\n"
<< "The parameters of this programs are names of files with persistence landscapes.\n";
std::vector<const char*> filenames;
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -46,6 +46,6 @@ int main(int argc, char** argv) {
delete lands[i];
}
- std::cout << "Average can be found in 'average.land' file\n";
+ std::clog << "Average can be found in 'average.land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp
index 1093c1aa..2246a37d 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes/compute_distance_of_landscapes.cpp
@@ -18,14 +18,14 @@
using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape;
int main(int argc, char** argv) {
- std::cout << "This program computes distance of persistence landscapes stored in files (the files needs to be "
+ std::clog << "This program computes distance of persistence landscapes stored in files (the files needs to be "
<< "created beforehand).\n"
<< "The first parameter of a program is an integer p. The program compute L^p distance of the two heat "
<< "maps. For L^infty distance choose p = -1. \n"
<< "The remaining parameters of this program are names of files with persistence landscapes.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -68,14 +68,14 @@ int main(int argc, char** argv) {
out.open("distance.land");
for (size_t i = 0; i != distance.size(); ++i) {
for (size_t j = 0; j != distance.size(); ++j) {
- std::cout << distance[i][j] << " ";
+ std::clog << distance[i][j] << " ";
out << distance[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'distance.land' file\n";
+ std::clog << "Distance can be found in 'distance.land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp
index 16b76497..44f50543 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes/compute_scalar_product_of_landscapes.cpp
@@ -17,12 +17,12 @@
using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape;
int main(int argc, char** argv) {
- std::cout << "This program computes scalar product of persistence landscapes stored in a file (the file needs to be "
+ std::clog << "This program computes scalar product of persistence landscapes stored in a file (the file needs to be "
<< "created beforehand). \n"
<< "The parameters of this programs are names of files with persistence landscapes.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -59,14 +59,14 @@ int main(int argc, char** argv) {
out.open("scalar_product.land");
for (size_t i = 0; i != scalar_product.size(); ++i) {
for (size_t j = 0; j != scalar_product.size(); ++j) {
- std::cout << scalar_product[i][j] << " ";
+ std::clog << scalar_product[i][j] << " ";
out << scalar_product[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'scalar_product.land' file\n";
+ std::clog << "Distance can be found in 'scalar_product.land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp
index 4d772086..fab5c75f 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes/create_landscapes.cpp
@@ -18,7 +18,7 @@
using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence landscapes files (*.land) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence landscapes files (*.land) of persistence diagrams files (*.pers) "
<< "provided as an input.\n"
<< "The first parameter of this program is a dimension of persistence that will be used in creation of "
<< "the persistence heat maps."
@@ -29,7 +29,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 3) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
std::vector<const char*> filenames;
@@ -43,7 +43,7 @@ int main(int argc, char** argv) {
}
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating a landscape based on file : " << filenames[i] << std::endl;
+ std::clog << "Creating a landscape based on file : " << filenames[i] << std::endl;
Persistence_landscape l(filenames[i], dimension);
std::stringstream ss;
ss << filenames[i] << ".land";
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp b/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp
index 1fe03640..da9b9bba 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes/plot_landscapes.cpp
@@ -16,10 +16,10 @@
using Persistence_landscape = Gudhi::Persistence_representations::Persistence_landscape;
int main(int argc, char** argv) {
- std::cout << "This program creates a gnuplot script from a persistence landscape stored in a file (the file needs "
+ std::clog << "This program creates a gnuplot script from a persistence landscape stored in a file (the file needs "
<< "to be created beforehand). Please call the code with the name of a single landscape file.\n";
if (argc != 2) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
index 36f3196b..8cd965f1 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
@@ -3,8 +3,8 @@ project(Persistence_representations_lanscapes_on_grid_utilities)
# Need to set grid min and max for further average, distance and scalar_product
add_persistence_representation_creation_utility(create_landscapes_on_grid "100" "0" "35" "-1")
-add_persistence_representation_plot_utility(plot_landscapes_on_grid ".g_land")
+add_persistence_representation_plot_utility(create_landscapes_on_grid plot_landscapes_on_grid ".g_land")
-add_persistence_representation_function_utility(average_landscapes_on_grid ".g_land")
-add_persistence_representation_function_utility(compute_distance_of_landscapes_on_grid ".g_land" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_landscapes_on_grid ".g_land")
+add_persistence_representation_function_utility(create_landscapes_on_grid average_landscapes_on_grid ".g_land")
+add_persistence_representation_function_utility(create_landscapes_on_grid compute_distance_of_landscapes_on_grid ".g_land" "1")
+add_persistence_representation_function_utility(create_landscapes_on_grid compute_scalar_product_of_landscapes_on_grid ".g_land")
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
index f92cde72..39f7a67f 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
@@ -16,12 +16,12 @@
using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid;
int main(int argc, char** argv) {
- std::cout << "This program computes average of persistence landscapes on grid stored in files (the files needs to "
+ std::clog << "This program computes average of persistence landscapes on grid stored in files (the files needs to "
<< "be created beforehand).\n"
<< "The parameters of this programs are names of files with persistence landscapes on grid.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -46,6 +46,6 @@ int main(int argc, char** argv) {
delete lands[i];
}
- std::cout << "Average can be found in 'average.g_land' file\n";
+ std::clog << "Average can be found in 'average.g_land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
index baec6aeb..01fd09d8 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
@@ -18,14 +18,14 @@
using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid;
int main(int argc, char** argv) {
- std::cout << "This program computes distance of persistence landscapes on grid stored in files (the files needs to "
+ std::clog << "This program computes distance of persistence landscapes on grid stored in files (the files needs to "
<< "be created beforehand).\n"
<< "The first parameter of a program is an integer p. The program compute L^p distance of the two heat "
<< "maps. For L^infty distance choose p = -1. \n"
<< "The remaining parameters of this program are names of files with persistence landscapes on grid.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -68,14 +68,14 @@ int main(int argc, char** argv) {
out.open("distance.g_land");
for (size_t i = 0; i != distance.size(); ++i) {
for (size_t j = 0; j != distance.size(); ++j) {
- std::cout << distance[i][j] << " ";
+ std::clog << distance[i][j] << " ";
out << distance[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'distance.g_land' file\n";
+ std::clog << "Distance can be found in 'distance.g_land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
index e94dacdb..71c2f419 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
@@ -17,13 +17,13 @@
using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid;
int main(int argc, char** argv) {
- std::cout
+ std::clog
<< "This program computes scalar product of persistence landscapes on grid stored in a file (the file needs to "
<< "be created beforehand). \n"
<< "The parameters of this programs are names of files with persistence landscapes on grid.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -60,14 +60,14 @@ int main(int argc, char** argv) {
out.open("scalar_product.g_land");
for (size_t i = 0; i != scalar_product.size(); ++i) {
for (size_t j = 0; j != scalar_product.size(); ++j) {
- std::cout << scalar_product[i][j] << " ";
+ std::clog << scalar_product[i][j] << " ";
out << scalar_product[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'scalar_product.g_land' file\n";
+ std::clog << "Distance can be found in 'scalar_product.g_land' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
index d510c3df..788313c4 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
@@ -18,7 +18,7 @@
using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence landscapes on grid files (*.g_land) of persistence diagrams files "
+ std::clog << "This program creates persistence landscapes on grid files (*.g_land) of persistence diagrams files "
<< "(*.pers) provided as an input.\n"
<< "The first parameter of a program is an integer, a size of a grid.\n"
<< "The second and third parameters are min and max of the grid. If you want those numbers to be computed "
@@ -32,7 +32,7 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 6) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
@@ -51,7 +51,7 @@ int main(int argc, char** argv) {
}
for (size_t i = 0; i != filenames.size(); ++i) {
- std::cout << "Creating persistence landscape on a grid based on a file : " << filenames[i] << std::endl;
+ std::clog << "Creating persistence landscape on a grid based on a file : " << filenames[i] << std::endl;
Persistence_landscape_on_grid l;
if ((min_ != -1) || (max_ != -1)) {
l = Persistence_landscape_on_grid(filenames[i], min_, max_, size_of_grid, dimension);
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
index 4e20f37f..ec6112b5 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
@@ -16,11 +16,11 @@
using Persistence_landscape_on_grid = Gudhi::Persistence_representations::Persistence_landscape_on_grid;
int main(int argc, char** argv) {
- std::cout << "This program creates a gnuplot script from a persistence landscape on grid stored in a file (the file "
+ std::clog << "This program creates a gnuplot script from a persistence landscape on grid stored in a file (the file "
<< "needs to be created beforehand). Please call the code with the name of a single landscape on grid file"
<< ".\n";
if (argc != 2) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
diff --git a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
index bc982094..5b22ca84 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
@@ -2,8 +2,8 @@ project(Persistence_vectors_utilities)
add_persistence_representation_creation_utility(create_persistence_vectors "-1")
-add_persistence_representation_plot_utility(plot_persistence_vectors ".vect")
+add_persistence_representation_plot_utility(create_persistence_vectors plot_persistence_vectors ".vect")
-add_persistence_representation_function_utility(average_persistence_vectors ".vect")
-add_persistence_representation_function_utility(compute_distance_of_persistence_vectors ".vect" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_persistence_vectors ".vect")
+add_persistence_representation_function_utility(create_persistence_vectors average_persistence_vectors ".vect")
+add_persistence_representation_function_utility(create_persistence_vectors compute_distance_of_persistence_vectors ".vect" "1")
+add_persistence_representation_function_utility(create_persistence_vectors compute_scalar_product_of_persistence_vectors ".vect")
diff --git a/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp
index 89e42f0f..4eb32eb3 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp
+++ b/src/Persistence_representations/utilities/persistence_vectors/average_persistence_vectors.cpp
@@ -17,12 +17,12 @@ using Euclidean_distance = Gudhi::Euclidean_distance;
using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram<Euclidean_distance>;
int main(int argc, char** argv) {
- std::cout << "This program computes average of persistence vectors stored in files (the files needs to "
+ std::clog << "This program computes average of persistence vectors stored in files (the files needs to "
<< "be created beforehand).\n"
<< "The parameters of this programs are names of files with persistence vectors.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -47,7 +47,7 @@ int main(int argc, char** argv) {
delete lands[i];
}
- std::cout << "Done \n";
+ std::clog << "Done \n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp
index 541dd25f..236981a3 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp
+++ b/src/Persistence_representations/utilities/persistence_vectors/compute_distance_of_persistence_vectors.cpp
@@ -19,14 +19,14 @@ using Euclidean_distance = Gudhi::Euclidean_distance;
using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram<Euclidean_distance>;
int main(int argc, char** argv) {
- std::cout << "This program compute distance of persistence vectors stored in a file (the file needs to be created "
+ std::clog << "This program compute distance of persistence vectors stored in a file (the file needs to be created "
"beforehand). \n";
- std::cout << "The first parameter of a program is an integer p. The program compute l^p distance of the vectors. For "
+ std::clog << "The first parameter of a program is an integer p. The program compute l^p distance of the vectors. For "
"l^infty distance choose p = -1. \n";
- std::cout << "The remaining parameters of this programs are names of files with persistence vectors.\n";
+ std::clog << "The remaining parameters of this programs are names of files with persistence vectors.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -69,14 +69,14 @@ int main(int argc, char** argv) {
out.open("distance.vect");
for (size_t i = 0; i != distance.size(); ++i) {
for (size_t j = 0; j != distance.size(); ++j) {
- std::cout << distance[i][j] << " ";
+ std::clog << distance[i][j] << " ";
out << distance[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'distance.vect' file\n";
+ std::clog << "Distance can be found in 'distance.vect' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
index bbc50c98..c6ea0e1c 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
+++ b/src/Persistence_representations/utilities/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
@@ -19,12 +19,12 @@ using Euclidean_distance = Gudhi::Euclidean_distance;
using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram<Euclidean_distance>;
int main(int argc, char** argv) {
- std::cout << "This program computes scalar product of persistence vectors stored in a file (the file needs to "
+ std::clog << "This program computes scalar product of persistence vectors stored in a file (the file needs to "
<< "be created beforehand). \n"
<< "The parameters of this programs are names of files with persistence vectors.\n";
if (argc < 3) {
- std::cout << "Wrong number of parameters, the program will now terminate \n";
+ std::clog << "Wrong number of parameters, the program will now terminate \n";
return 1;
}
@@ -61,14 +61,14 @@ int main(int argc, char** argv) {
out.open("scalar_product.vect");
for (size_t i = 0; i != scalar_product.size(); ++i) {
for (size_t j = 0; j != scalar_product.size(); ++j) {
- std::cout << scalar_product[i][j] << " ";
+ std::clog << scalar_product[i][j] << " ";
out << scalar_product[i][j] << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
out << std::endl;
}
out.close();
- std::cout << "Distance can be found in 'scalar_product.vect' file\n";
+ std::clog << "Distance can be found in 'scalar_product.vect' file\n";
return 0;
}
diff --git a/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp
index f974c3d3..608e04e5 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp
+++ b/src/Persistence_representations/utilities/persistence_vectors/create_persistence_vectors.cpp
@@ -19,7 +19,7 @@ using Euclidean_distance = Gudhi::Euclidean_distance;
using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram<Euclidean_distance>;
int main(int argc, char** argv) {
- std::cout << "This program creates persistence vectors files (*.vect) of persistence diagrams files (*.pers) "
+ std::clog << "This program creates persistence vectors files (*.vect) of persistence diagrams files (*.pers) "
<< "provided as an input.\n"
<< "The first parameter of this program is a dimension of persistence that will be used in creation of "
<< "the persistence heat maps."
@@ -30,11 +30,11 @@ int main(int argc, char** argv) {
<< "The remaining parameters are the names of files with persistence diagrams. \n";
if (argc < 3) {
- std::cout << "Wrong parameter list, the program will now terminate \n";
+ std::clog << "Wrong parameter list, the program will now terminate \n";
return 1;
}
- std::cout << "The remaining parameters are the names of files with persistence diagrams. \n";
+ std::clog << "The remaining parameters are the names of files with persistence diagrams. \n";
int dim = atoi(argv[1]);
unsigned dimension = std::numeric_limits<unsigned>::max();
if (dim >= 0) {
diff --git a/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp b/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp
index de08fcfe..2decb134 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp
+++ b/src/Persistence_representations/utilities/persistence_vectors/plot_persistence_vectors.cpp
@@ -17,10 +17,10 @@ using Euclidean_distance = Gudhi::Euclidean_distance;
using Vector_distances_in_diagram = Gudhi::Persistence_representations::Vector_distances_in_diagram<Euclidean_distance>;
int main(int argc, char** argv) {
- std::cout << "This program create a Gnuplot script to plot persistence vector. Please call this program with the "
+ std::clog << "This program create a Gnuplot script to plot persistence vector. Please call this program with the "
"name of file with persistence vector. \n";
if (argc != 2) {
- std::cout << "Wrong number of parameters, the program will now terminate. \n";
+ std::clog << "Wrong number of parameters, the program will now terminate. \n";
return 1;
}
Vector_distances_in_diagram l;
diff --git a/src/Persistent_cohomology/benchmark/CMakeLists.txt b/src/Persistent_cohomology/benchmark/CMakeLists.txt
index 2bb3b0c7..ad8dc84b 100644
--- a/src/Persistent_cohomology/benchmark/CMakeLists.txt
+++ b/src/Persistent_cohomology/benchmark/CMakeLists.txt
@@ -2,11 +2,13 @@ project(Persistent_cohomology_benchmark)
if(GMP_FOUND)
if(GMPXX_FOUND)
- add_executable ( performance_rips_persistence EXCLUDE_FROM_ALL performance_rips_persistence.cpp )
- target_link_libraries(performance_rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY} ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
- if (TBB_FOUND)
- target_link_libraries(performance_rips_persistence ${TBB_LIBRARIES})
- endif(TBB_FOUND)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/Kl.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ if (TARGET Boost::program_options)
+ add_executable ( performance_rips_persistence EXCLUDE_FROM_ALL performance_rips_persistence.cpp )
+ target_link_libraries(performance_rips_persistence Boost::program_options ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
+ if (TBB_FOUND)
+ target_link_libraries(performance_rips_persistence ${TBB_LIBRARIES})
+ endif(TBB_FOUND)
+ file(COPY "${CMAKE_SOURCE_DIR}/data/points/Kl.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ endif()
endif(GMPXX_FOUND)
endif(GMP_FOUND)
diff --git a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
index 45757002..3bec8830 100644
--- a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
+++ b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
@@ -49,7 +49,7 @@ void timing_persistence(FilteredComplex & cpx
* with a Hasse diagram. The Hasse diagram represents explicitly all
* codimension 1 incidence relations in the complex, and hence leads to
* a faster computation of persistence because boundaries are precomputed.
- * Hovewer, the simplex tree may be constructed directly from a point cloud and
+ * However, the simplex tree may be constructed directly from a point cloud and
* is more compact.
* We compute persistent homology with coefficient fields Z/2Z and Z/1223Z.
* We present also timings for the computation of multi-field persistent
@@ -74,7 +74,7 @@ int main(int argc, char * argv[]) {
Rips_complex rips_complex_from_file(off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance());
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Compute Rips graph in " << elapsed_sec << " ms.\n";
+ std::clog << "Compute Rips graph in " << elapsed_sec << " ms.\n";
// Construct the Rips complex in a Simplex Tree
Simplex_tree st;
@@ -86,16 +86,16 @@ int main(int argc, char * argv[]) {
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Compute Rips complex in " << elapsed_sec << " ms.\n";
- std::cout << " - dimension = " << st.dimension() << std::endl;
- std::cout << " - number of simplices = " << st.num_simplices() << std::endl;
+ std::clog << "Compute Rips complex in " << elapsed_sec << " ms.\n";
+ std::clog << " - dimension = " << st.dimension() << std::endl;
+ std::clog << " - number of simplices = " << st.num_simplices() << std::endl;
// Sort the simplices in the order of the filtration
start = std::chrono::system_clock::now();
st.initialize_filtration();
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Order the simplices of the filtration in " << elapsed_sec << " ms.\n";
+ std::clog << "Order the simplices of the filtration in " << elapsed_sec << " ms.\n";
// Copy the keys inside the simplices
start = std::chrono::system_clock::now();
@@ -106,22 +106,22 @@ int main(int argc, char * argv[]) {
}
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Copied the keys inside the simplices in " << elapsed_sec << " ms.\n";
+ std::clog << "Copied the keys inside the simplices in " << elapsed_sec << " ms.\n";
// Convert the simplex tree into a hasse diagram
start = std::chrono::system_clock::now();
Gudhi::Hasse_complex<> hcpx(st);
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Convert the simplex tree into a Hasse diagram in " << elapsed_sec << " ms.\n";
+ std::clog << "Convert the simplex tree into a Hasse diagram in " << elapsed_sec << " ms.\n";
- std::cout << "Timings when using a simplex tree: \n";
+ std::clog << "Timings when using a simplex tree: \n";
timing_persistence(st, p);
timing_persistence(st, q);
timing_persistence(st, p, q);
- std::cout << "Timings when using a Hasse complex: \n";
+ std::clog << "Timings when using a Hasse complex: \n";
timing_persistence(hcpx, p);
timing_persistence(hcpx, q);
timing_persistence(hcpx, p, q);
@@ -130,7 +130,7 @@ int main(int argc, char * argv[]) {
}
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << "Running the complex destructors in " << elapsed_sec << " ms.\n";
+ std::clog << "Running the complex destructors in " << elapsed_sec << " ms.\n";
return 0;
}
@@ -145,13 +145,13 @@ timing_persistence(FilteredComplex & cpx
Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, Field_Zp > pcoh(cpx);
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Initialize pcoh in " << elapsed_sec << " ms.\n";
+ std::clog << " Initialize pcoh in " << elapsed_sec << " ms.\n";
// initializes the coefficient field for homology
start = std::chrono::system_clock::now();
pcoh.init_coefficients(p);
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Initialize the coefficient field in " << elapsed_sec << " ms.\n";
+ std::clog << " Initialize the coefficient field in " << elapsed_sec << " ms.\n";
start = std::chrono::system_clock::now();
@@ -159,12 +159,12 @@ timing_persistence(FilteredComplex & cpx
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Compute persistent homology in Z/" << p << "Z in " << elapsed_sec << " ms.\n";
+ std::clog << " Compute persistent homology in Z/" << p << "Z in " << elapsed_sec << " ms.\n";
start = std::chrono::system_clock::now();
}
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Run the persistence destructors in " << elapsed_sec << " ms.\n";
+ std::clog << " Run the persistence destructors in " << elapsed_sec << " ms.\n";
}
template< typename FilteredComplex>
@@ -179,13 +179,13 @@ timing_persistence(FilteredComplex & cpx
Gudhi::persistent_cohomology::Persistent_cohomology< FilteredComplex, Multi_field > pcoh(cpx);
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Initialize pcoh in " << elapsed_sec << " ms.\n";
+ std::clog << " Initialize pcoh in " << elapsed_sec << " ms.\n";
// initializes the coefficient field for homology
start = std::chrono::system_clock::now();
pcoh.init_coefficients(p, q);
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Initialize the coefficient field in " << elapsed_sec << " ms.\n";
+ std::clog << " Initialize the coefficient field in " << elapsed_sec << " ms.\n";
// compute persistent homology, disgarding persistent features of life shorter than min_persistence
start = std::chrono::system_clock::now();
@@ -194,11 +194,11 @@ timing_persistence(FilteredComplex & cpx
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Compute multi-field persistent homology in all coefficient fields Z/pZ "
+ std::clog << " Compute multi-field persistent homology in all coefficient fields Z/pZ "
<< "with p in [" << p << ";" << q << "] in " << elapsed_sec << " ms.\n";
start = std::chrono::system_clock::now();
}
end = std::chrono::system_clock::now();
elapsed_sec = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
- std::cout << " Run the persistence destructors in " << elapsed_sec << " ms.\n";
+ std::clog << " Run the persistence destructors in " << elapsed_sec << " ms.\n";
}
diff --git a/src/Persistent_cohomology/concept/FilteredComplex.h b/src/Persistent_cohomology/concept/FilteredComplex.h
index 26ac7ac8..59ce25e3 100644
--- a/src/Persistent_cohomology/concept/FilteredComplex.h
+++ b/src/Persistent_cohomology/concept/FilteredComplex.h
@@ -103,7 +103,7 @@ Filtration_simplex_range filtration_simplex_range();
/** @} */
-/* \brief Iterator over the simplices of the complex,
+/** \brief Iterator over the simplices of the complex,
* in an arbitrary order.
*
* 'value_type' must be 'Simplex_handle'.*/
diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
index 46b784d8..94579564 100644
--- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
+++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
@@ -21,7 +21,7 @@ namespace persistent_cohomology {
\author Clément Maria
Computation of persistent cohomology using the algorithm of
- \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:journals/corr/abs-1208-5018
+ \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:conf/compgeom/DeyFW14
and the Compressed Annotation Matrix
implementation of \cite DBLP:conf/esa/BoissonnatDM13
@@ -131,8 +131,7 @@ namespace persistent_cohomology {
We provide several example files: run these examples with -h for details on their use, and read the README file.
-\li <a href="_rips_complex_2rips_persistence_8cpp-example.html">
-Rips_complex/rips_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its persistence
+\li \gudhi_example_link{Rips_complex,rips_persistence.cpp} computes the Rips complex of a point cloud and outputs its persistence
diagram.
\code $> ./rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3 \endcode
\code The complex contains 177838 simplices
@@ -144,12 +143,10 @@ diagram.
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
-Persistent_cohomology/rips_multifield_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its
+\li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp} computes the Rips complex of a point cloud and outputs its
persistence diagram with a family of field coefficients.
-\li <a href="_rips_complex_2rips_distance_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_distance_matrix_persistence.cpp</a> computes the Rips complex of a distance matrix and
+\li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp} computes the Rips complex of a distance matrix and
outputs its persistence diagram.
The file should contain square or lower triangular distance matrix with semicolons as separators.
@@ -158,8 +155,7 @@ Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_rips_complex_2rips_correlation_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_correlation_matrix_persistence.cpp</a>
+\li \gudhi_example_link{Rips_complex,rips_correlation_matrix_persistence.cpp}
computes the Rips complex of a correlation matrix and outputs its persistence diagram.
Note that no check is performed if the matrix given as the input is a correlation matrix.
@@ -169,8 +165,7 @@ Please refer to data/correlation_matrix/lower_triangular_correlation_matrix.csv
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_3d_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/2\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_3d_persistence ../../data/points/tore3D_300.off -p 2 -m 0.45 \endcode
\code Simplex_tree dim: 3
@@ -235,8 +230,7 @@ Note that the lengths of the sides of the periodic cuboid have to be the same.<b
3 2 36.8838 inf
3 3 58.6783 inf \endcode
-\li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/p\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_persistence -r 32 -p 2 -m 0.45 ../../data/points/tore3D_300.off \endcode
\code Alpha complex is of dimension 3 - 9273 simplices - 300 vertices.
@@ -248,8 +242,7 @@ Simplex_tree dim: 3
More details on the <a href="../../alphacomplex/">Alpha complex utilities</a> dedicated page.
-\li <a href="_persistent_cohomology_2plain_homology_8cpp-example.html">
-Persistent_cohomology/plain_homology.cpp</a> computes the plain homology of a simple simplicial complex without
+\li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp} computes the plain homology of a simple simplicial complex without
filtration values.
*/
diff --git a/src/Persistent_cohomology/example/CMakeLists.txt b/src/Persistent_cohomology/example/CMakeLists.txt
index 94ec13c5..d66954d7 100644
--- a/src/Persistent_cohomology/example/CMakeLists.txt
+++ b/src/Persistent_cohomology/example/CMakeLists.txt
@@ -1,68 +1,70 @@
project(Persistent_cohomology_examples)
add_executable(plain_homology plain_homology.cpp)
+if (TBB_FOUND)
+ target_link_libraries(plain_homology ${TBB_LIBRARIES})
+endif()
+add_test(NAME Persistent_cohomology_example_plain_homology COMMAND $<TARGET_FILE:plain_homology>)
add_executable(persistence_from_simple_simplex_tree persistence_from_simple_simplex_tree.cpp)
-
-add_executable(rips_persistence_step_by_step rips_persistence_step_by_step.cpp)
-target_link_libraries(rips_persistence_step_by_step ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
-add_executable(rips_persistence_via_boundary_matrix rips_persistence_via_boundary_matrix.cpp)
-target_link_libraries(rips_persistence_via_boundary_matrix ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
-add_executable(persistence_from_file persistence_from_file.cpp)
-target_link_libraries(persistence_from_file ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
if (TBB_FOUND)
- target_link_libraries(plain_homology ${TBB_LIBRARIES})
target_link_libraries(persistence_from_simple_simplex_tree ${TBB_LIBRARIES})
- target_link_libraries(rips_persistence_step_by_step ${TBB_LIBRARIES})
- target_link_libraries(rips_persistence_via_boundary_matrix ${TBB_LIBRARIES})
- target_link_libraries(persistence_from_file ${TBB_LIBRARIES})
endif()
-
-add_test(NAME Persistent_cohomology_example_plain_homology COMMAND $<TARGET_FILE:plain_homology>)
add_test(NAME Persistent_cohomology_example_from_simple_simplex_tree COMMAND $<TARGET_FILE:persistence_from_simple_simplex_tree>
- "1" "0")
-add_test(NAME Persistent_cohomology_example_from_rips_step_by_step_on_tore_3D COMMAND $<TARGET_FILE:rips_persistence_step_by_step>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
-add_test(NAME Persistent_cohomology_example_via_boundary_matrix COMMAND $<TARGET_FILE:rips_persistence_via_boundary_matrix>
- "${CMAKE_SOURCE_DIR}/data/points/Kl.off" "-r" "0.16" "-d" "3" "-p" "3" "-m" "100")
-add_test(NAME Persistent_cohomology_example_from_file_3_2_0 COMMAND $<TARGET_FILE:persistence_from_file>
- "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/bunny_5000_complex.fsc" "-p" "2" "-m" "0")
-add_test(NAME Persistent_cohomology_example_from_file_3_3_100 COMMAND $<TARGET_FILE:persistence_from_file>
- "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/bunny_5000_complex.fsc" "-p" "3" "-m" "100")
+ "2" "0")
-install(TARGETS plain_homology DESTINATION bin)
-install(TARGETS persistence_from_simple_simplex_tree DESTINATION bin)
-install(TARGETS rips_persistence_step_by_step DESTINATION bin)
-install(TARGETS rips_persistence_via_boundary_matrix DESTINATION bin)
-install(TARGETS persistence_from_file DESTINATION bin)
+if(TARGET Boost::program_options)
+ add_executable(rips_persistence_step_by_step rips_persistence_step_by_step.cpp)
+ target_link_libraries(rips_persistence_step_by_step Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(rips_persistence_step_by_step ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Persistent_cohomology_example_from_rips_step_by_step_on_tore_3D COMMAND $<TARGET_FILE:rips_persistence_step_by_step>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
+endif()
+
+if(TARGET Boost::program_options)
+ add_executable(rips_persistence_via_boundary_matrix rips_persistence_via_boundary_matrix.cpp)
+ target_link_libraries(rips_persistence_via_boundary_matrix Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(rips_persistence_via_boundary_matrix ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Persistent_cohomology_example_via_boundary_matrix COMMAND $<TARGET_FILE:rips_persistence_via_boundary_matrix>
+ "${CMAKE_SOURCE_DIR}/data/points/Kl.off" "-r" "0.16" "-d" "3" "-p" "3" "-m" "100")
+endif()
+
+if(TARGET Boost::program_options)
+ add_executable(persistence_from_file persistence_from_file.cpp)
+ target_link_libraries(persistence_from_file Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(persistence_from_file ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Persistent_cohomology_example_from_file_3_2_0 COMMAND $<TARGET_FILE:persistence_from_file>
+ "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/Klein_bottle_complex.fsc" "-p" "2" "-m" "0")
+ add_test(NAME Persistent_cohomology_example_from_file_3_3_100 COMMAND $<TARGET_FILE:persistence_from_file>
+ "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/Klein_bottle_complex.fsc" "-p" "3" "-m" "100")
+endif()
if(GMP_FOUND)
- if(GMPXX_FOUND)
+ if(GMPXX_FOUND)
+ if(TARGET Boost::program_options)
add_executable(rips_multifield_persistence rips_multifield_persistence.cpp )
target_link_libraries(rips_multifield_persistence
- ${Boost_PROGRAM_OPTIONS_LIBRARY} ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
+ Boost::program_options ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
if (TBB_FOUND)
target_link_libraries(rips_multifield_persistence ${TBB_LIBRARIES})
endif(TBB_FOUND)
add_test(NAME Persistent_cohomology_example_multifield_2_71 COMMAND $<TARGET_FILE:rips_multifield_persistence>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "2" "-q" "71")
- install(TARGETS rips_multifield_persistence DESTINATION bin)
- endif(GMPXX_FOUND)
+ endif()
+ endif(GMPXX_FOUND)
endif(GMP_FOUND)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
-
add_executable(custom_persistence_sort custom_persistence_sort.cpp)
target_link_libraries(custom_persistence_sort ${CGAL_LIBRARY})
-
if (TBB_FOUND)
target_link_libraries(custom_persistence_sort ${TBB_LIBRARIES})
endif(TBB_FOUND)
add_test(NAME Persistent_cohomology_example_custom_persistence_sort COMMAND $<TARGET_FILE:custom_persistence_sort>)
-
- install(TARGETS custom_persistence_sort DESTINATION bin)
-
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Persistent_cohomology/example/custom_persistence_sort.cpp b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
index be74cf50..bba0b2f7 100644
--- a/src/Persistent_cohomology/example/custom_persistence_sort.cpp
+++ b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
@@ -33,7 +33,7 @@ using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomolog
Gudhi::persistent_cohomology::Field_Zp >;
std::vector<Point> random_points() {
- // Instanciate a random point generator
+ // Instantiate a random point generator
CGAL::Random rng(0);
// Generate "points_number" random points in a vector
@@ -70,26 +70,23 @@ struct cmp_intervals_by_dim_then_length {
int main(int argc, char **argv) {
std::vector<Point> points = random_points();
- std::cout << "Points size=" << points.size() << std::endl;
+ std::clog << "Points size=" << points.size() << std::endl;
// Alpha complex persistence computation from generated points
Alpha_complex alpha_complex_from_points(points);
- std::cout << "alpha_complex_from_points" << std::endl;
+ std::clog << "alpha_complex_from_points" << std::endl;
Simplex_tree simplex;
- std::cout << "simplex" << std::endl;
+ std::clog << "simplex" << std::endl;
if (alpha_complex_from_points.create_complex(simplex, 0.6)) {
- std::cout << "simplex" << std::endl;
+ std::clog << "simplex" << std::endl;
// ----------------------------------------------------------------------------
// Display information about the alpha complex
// ----------------------------------------------------------------------------
- std::cout << "Simplicial complex is of dimension " << simplex.dimension() <<
+ std::clog << "Simplicial complex is of dimension " << simplex.dimension() <<
" - " << simplex.num_simplices() << " simplices - " <<
simplex.num_vertices() << " vertices." << std::endl;
- // Sort the simplices in the order of the filtration
- simplex.initialize_filtration();
-
- std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl;
+ std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl;
Persistent_cohomology pcoh(simplex);
@@ -102,23 +99,23 @@ int main(int argc, char **argv) {
auto persistent_pairs = pcoh.get_persistent_pairs();
std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp);
for (auto pair : persistent_pairs) {
- std::cout << simplex.dimension(get<0>(pair)) << " "
+ std::clog << simplex.dimension(get<0>(pair)) << " "
<< simplex.filtration(get<0>(pair)) << " "
<< simplex.filtration(get<1>(pair)) << std::endl;
}
// Persistent Betti numbers
- std::cout << "The persistent Betti numbers in interval [0.40, 0.41] are : ";
+ std::clog << "The persistent Betti numbers in interval [0.40, 0.41] are : ";
for (int dim = 0; dim < simplex.dimension(); dim++)
- std::cout << "b" << dim << " = " << pcoh.persistent_betti_number(dim, 0.40, 0.41) << " ; ";
- std::cout << std::endl;
+ std::clog << "b" << dim << " = " << pcoh.persistent_betti_number(dim, 0.40, 0.41) << " ; ";
+ std::clog << std::endl;
// Betti numbers
std::vector<int> betti_numbers = pcoh.betti_numbers();
- std::cout << "The Betti numbers are : ";
+ std::clog << "The Betti numbers are : ";
for (std::size_t i = 0; i < betti_numbers.size(); i++)
- std::cout << "b" << i << " = " << betti_numbers[i] << " ; ";
- std::cout << std::endl;
+ std::clog << "b" << i << " = " << betti_numbers[i] << " ; ";
+ std::clog << std::endl;
}
return 0;
}
diff --git a/src/Persistent_cohomology/example/persistence_from_file.cpp b/src/Persistent_cohomology/example/persistence_from_file.cpp
index d169cc63..7f89c001 100644
--- a/src/Persistent_cohomology/example/persistence_from_file.cpp
+++ b/src/Persistent_cohomology/example/persistence_from_file.cpp
@@ -37,9 +37,9 @@ int main(int argc, char * argv[]) {
program_options(argc, argv, simplex_tree_file, output_file, p, min_persistence);
- std::cout << "Simplex_tree from file=" << simplex_tree_file.c_str() << " - output_file=" << output_file.c_str()
+ std::clog << "Simplex_tree from file=" << simplex_tree_file.c_str() << " - output_file=" << output_file.c_str()
<< std::endl;
- std::cout << " - p=" << p << " - min_persistence=" << min_persistence << std::endl;
+ std::clog << " - p=" << p << " - min_persistence=" << min_persistence << std::endl;
// Read the list of simplices from a file.
Simplex_tree<> simplex_tree;
@@ -47,21 +47,18 @@ int main(int argc, char * argv[]) {
std::ifstream simplex_tree_stream(simplex_tree_file);
simplex_tree_stream >> simplex_tree;
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices" << std::endl;
- std::cout << " - dimension " << simplex_tree.dimension() << std::endl;
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices" << std::endl;
+ std::clog << " - dimension " << simplex_tree.dimension() << std::endl;
/*
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for( auto f_simplex : simplex_tree.filtration_simplex_range() )
- { std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ { std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for( auto vertex : simplex_tree.simplex_vertex_range(f_simplex) )
- { std::cout << vertex << " "; }
- std::cout << std::endl;
+ { std::clog << vertex << " "; }
+ std::clog << std::endl;
}*/
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology< Simplex_tree<>, Field_Zp > pcoh(simplex_tree);
// initializes the coefficient field for homology
@@ -96,7 +93,7 @@ void program_options(int argc, char * argv[]
visible.add_options()
("help,h", "produce help message")
("output-file,o", po::value<std::string>(&output_file)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")
+ "Name of file in which the persistence diagram is written. Default print in standard output")
("field-charac,p", po::value<int>(&p)->default_value(11),
"Characteristic p of the coefficient field Z/pZ for computing homology.")
("min-persistence,m", po::value<Filtration_value>(&min_persistence),
@@ -114,17 +111,17 @@ void program_options(int argc, char * argv[]
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
index 3c91662f..3da6771e 100644
--- a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
+++ b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
@@ -51,62 +51,62 @@ int main(int argc, char * const argv[]) {
}
// TEST OF INSERTION
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF INSERTION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF INSERTION" << std::endl;
Simplex_tree st;
// ++ FIRST
- std::cout << " - INSERT (0,1,2)" << std::endl;
+ std::clog << " - INSERT (0,1,2)" << std::endl;
typeVectorVertex SimplexVector = {0, 1, 2};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
// ++ SECOND
- std::cout << " - INSERT 3" << std::endl;
+ std::clog << " - INSERT 3" << std::endl;
SimplexVector = {3};
st.insert_simplex_and_subfaces(SimplexVector, 0.1);
// ++ THIRD
- std::cout << " - INSERT (0,3)" << std::endl;
+ std::clog << " - INSERT (0,3)" << std::endl;
SimplexVector = {0, 3};
st.insert_simplex_and_subfaces(SimplexVector, 0.2);
// ++ FOURTH
- std::cout << " - INSERT (0,1) (already inserted)" << std::endl;
+ std::clog << " - INSERT (0,1) (already inserted)" << std::endl;
SimplexVector = {0, 1};
st.insert_simplex_and_subfaces(SimplexVector, 0.2);
// ++ FIFTH
- std::cout << " - INSERT (3,4,5)" << std::endl;
+ std::clog << " - INSERT (3,4,5)" << std::endl;
SimplexVector = {3, 4, 5};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
// ++ SIXTH
- std::cout << " - INSERT (0,1,6,7)" << std::endl;
+ std::clog << " - INSERT (0,1,6,7)" << std::endl;
SimplexVector = {0, 1, 6, 7};
st.insert_simplex_and_subfaces(SimplexVector, 0.4);
// ++ SEVENTH
- std::cout << " - INSERT (4,5,8,9)" << std::endl;
+ std::clog << " - INSERT (4,5,8,9)" << std::endl;
SimplexVector = {4, 5, 8, 9};
st.insert_simplex_and_subfaces(SimplexVector, 0.4);
// ++ EIGHTH
- std::cout << " - INSERT (9,10,11)" << std::endl;
+ std::clog << " - INSERT (9,10,11)" << std::endl;
SimplexVector = {9, 10, 11};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
- // ++ NINETH
- std::cout << " - INSERT (2,10,12)" << std::endl;
+ // ++ NINTH
+ std::clog << " - INSERT (2,10,12)" << std::endl;
SimplexVector = {2, 10, 12};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
// ++ TENTH
- std::cout << " - INSERT (11,6)" << std::endl;
+ std::clog << " - INSERT (11,6)" << std::endl;
SimplexVector = {6, 11};
st.insert_simplex_and_subfaces(SimplexVector, 0.2);
// ++ ELEVENTH
- std::cout << " - INSERT (13,14,15)" << std::endl;
+ std::clog << " - INSERT (13,14,15)" << std::endl;
SimplexVector = {13, 14, 15};
st.insert_simplex_and_subfaces(SimplexVector, 0.25);
@@ -131,24 +131,24 @@ int main(int argc, char * const argv[]) {
/* An edge [10,12,2] */
- std::cout << "The complex contains " << st.num_simplices() << " simplices - " << st.num_vertices() << " vertices "
+ std::clog << "The complex contains " << st.num_simplices() << " simplices - " << st.num_vertices() << " vertices "
<< std::endl;
- std::cout << " - dimension " << st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:"
+ std::clog << " - dimension " << st.dimension() << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:"
<< std::endl;
- std::cout << "**************************************************************" << std::endl;
- std::cout << "strict graph G { " << std::endl;
+ std::clog << "**************************************************************" << std::endl;
+ std::clog << "strict graph G { " << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << static_cast<int>(vertex) << " -- ";
+ std::clog << static_cast<int>(vertex) << " -- ";
}
- std::cout << ";" << std::endl;
+ std::clog << ";" << std::endl;
}
- std::cout << "}" << std::endl;
- std::cout << "**************************************************************" << std::endl;
+ std::clog << "}" << std::endl;
+ std::clog << "**************************************************************" << std::endl;
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(st);
diff --git a/src/Persistent_cohomology/example/plain_homology.cpp b/src/Persistent_cohomology/example/plain_homology.cpp
index 84333e46..236b67de 100644
--- a/src/Persistent_cohomology/example/plain_homology.cpp
+++ b/src/Persistent_cohomology/example/plain_homology.cpp
@@ -59,9 +59,6 @@ int main() {
st.insert_simplex_and_subfaces(edge35);
st.insert_simplex(vertex4);
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
-
// Class for homology computation
// By default, since the complex has dimension 1, only 0-dimensional homology would be computed.
// Here we also want persistent homology to be computed for the maximal dimension in the complex (persistence_dim_max = true)
@@ -83,9 +80,9 @@ int main() {
pcoh.output_diagram();
// Print the Betti numbers are b0=2 and b1=2.
- std::cout << std::endl;
- std::cout << "The Betti numbers are : ";
+ std::clog << std::endl;
+ std::clog << "The Betti numbers are : ";
for (int i = 0; i < 3; i++)
- std::cout << "b" << i << " = " << pcoh.betti_number(i) << " ; ";
- std::cout << std::endl;
+ std::clog << "b" << i << " = " << pcoh.betti_number(i) << " ; ";
+ std::clog << std::endl;
}
diff --git a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
index 9eb5ccfc..84453898 100644
--- a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
+++ b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
@@ -56,11 +56,8 @@ int main(int argc, char * argv[]) {
Simplex_tree simplex_tree;
rips_complex_from_file.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
@@ -99,7 +96,7 @@ void program_options(int argc, char * argv[]
visible.add_options()
("help,h", "produce help message")
("output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")
+ "Name of file in which the persistence diagram is written. Default print in standard output")
("max-edge-length,r", po::value<Filtration_value>(&threshold)->default_value(0),
"Maximal length of an edge for the Rips complex construction.")
("cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
@@ -107,7 +104,7 @@ void program_options(int argc, char * argv[]
("min-field-charac,p", po::value<int>(&min_p)->default_value(2),
"Minimal characteristic p of the coefficient field Z/pZ.")
("max-field-charac,q", po::value<int>(&max_p)->default_value(1223),
- "Minimial characteristic q of the coefficient field Z/pZ.")
+ "Maximal characteristic q of the coefficient field Z/pZ.")
("min-persistence,m", po::value<Filtration_value>(&min_persistence),
"Minimal lifetime of homology feature to be recorded. Default is 0");
@@ -123,20 +120,20 @@ void program_options(int argc, char * argv[]
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with various coefficient fields \n";
- std::cout << "of a Rips complex defined on a set of input points. The coefficient \n";
- std::cout << "fields are all the Z/rZ for a prime number r contained in the \n";
- std::cout << "specified range [p,q]\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p1*...*pr dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p1*...*pr is the product of prime numbers pi such that the homology \n";
- std::cout << "feature exists in homology with Z/piZ coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with various coefficient fields \n";
+ std::clog << "of a Rips complex defined on a set of input points. The coefficient \n";
+ std::clog << "fields are all the Z/rZ for a prime number r contained in the \n";
+ std::clog << "specified range [p,q]\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p1*...*pr dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p1*...*pr is the product of prime numbers pi such that the homology \n";
+ std::clog << "feature exists in homology with Z/piZ coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
index 02db05ec..6f37cf5c 100644
--- a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
+++ b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
@@ -73,11 +73,8 @@ int main(int argc, char * argv[]) {
// expand the graph until dimension dim_max
st.expansion(dim_max);
- std::cout << "The complex contains " << st.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << st.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
+ std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << st.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(st);
@@ -115,7 +112,7 @@ void program_options(int argc, char * argv[]
visible.add_options()
("help,h", "produce help message")
("output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")
+ "Name of file in which the persistence diagram is written. Default print in standard output")
("max-edge-length,r",
po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal length of an edge for the Rips complex construction.")
@@ -138,17 +135,17 @@ void program_options(int argc, char * argv[]
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
index 37fa5e93..6b60f603 100644
--- a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
+++ b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
@@ -17,10 +17,6 @@
#include <boost/program_options.hpp>
-#ifdef GUDHI_USE_TBB
-#include <tbb/task_scheduler_init.h>
-#endif
-
#include <string>
#include <vector>
@@ -64,13 +60,8 @@ int main(int argc, char * argv[]) {
Simplex_tree& st = *new Simplex_tree;
rips_complex_from_file.create_complex(st, dim_max);
- std::cout << "The complex contains " << st.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << st.dimension() << " \n";
-
-#ifdef GUDHI_USE_TBB
- // Unnecessary, but clarifies which operations are parallel.
- tbb::task_scheduler_init ts;
-#endif
+ std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << st.dimension() << " \n";
// Sort the simplices in the order of the filtration
st.initialize_filtration();
@@ -81,10 +72,6 @@ int main(int argc, char * argv[]) {
// Convert to a more convenient representation.
Gudhi::Hasse_complex<> hcpx(st);
-#ifdef GUDHI_USE_TBB
- ts.terminate();
-#endif
-
// Free some space.
delete &st;
@@ -122,7 +109,7 @@ void program_options(int argc, char * argv[]
visible.add_options()
("help,h", "produce help message")
("output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")
+ "Name of file in which the persistence diagram is written. Default print in standard output")
("max-edge-length,r", po::value<Filtration_value>(&threshold)->default_value(0),
"Maximal length of an edge for the Rips complex construction.")
("cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
@@ -144,17 +131,17 @@ void program_options(int argc, char * argv[]
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index 0f1876d0..c00bd33d 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -100,7 +100,7 @@ class Persistent_cohomology {
ds_rank_(num_simplices_), // union-find
ds_parent_(num_simplices_), // union-find
ds_repr_(num_simplices_, NULL), // union-find -> annotation vectors
- dsets_(&ds_rank_[0], &ds_parent_[0]), // union-find
+ dsets_(ds_rank_.data(), ds_parent_.data()), // union-find
cam_(), // collection of annotation vectors
zero_cocycles_(), // union-find -> Simplex_key of creator for 0-homology
transverse_idx_(), // key -> row
@@ -211,7 +211,7 @@ class Persistent_cohomology {
/** \brief Update the cohomology groups under the insertion of an edge.
*
* The 0-homology is maintained with a simple Union-Find data structure, which
- * explains the existance of a specific function of edge insertions. */
+ * explains the existence of a specific function of edge insertions. */
void update_cohomology_groups_edge(Simplex_handle sigma) {
Simplex_handle u, v;
boost::tie(u, v) = cpx_->endpoints(sigma);
@@ -288,10 +288,7 @@ class Persistent_cohomology {
// with multiplicity. We used to sum the coefficients directly in
// annotations_in_boundary by using a map, we now do it later.
typedef std::pair<Column *, int> annotation_t;
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<annotation_t> annotations_in_boundary;
+ thread_local std::vector<annotation_t> annotations_in_boundary;
annotations_in_boundary.clear();
int sign = 1 - 2 * (dim_sigma % 2); // \in {-1,1} provides the sign in the
// alternate sum in the boundary.
@@ -564,35 +561,22 @@ class Persistent_cohomology {
void output_diagram(std::ostream& ostream = std::cout) {
cmp_intervals_by_length cmp(cpx_);
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
- bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
- // Special case on windows, inf is "1.#INF" (cf. unitary tests and R package TDA)
- if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits<Filtration_value>::infinity()) {
- ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " inf " << std::endl;
- } else {
- ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " "
- << cpx_->filtration(get<1>(pair)) << " " << std::endl;
- }
+ ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
+ << cpx_->filtration(get<0>(pair)) << " "
+ << cpx_->filtration(get<1>(pair)) << " " << std::endl;
}
}
void write_output_diagram(std::string diagram_name) {
std::ofstream diagram_out(diagram_name.c_str());
+ diagram_out.exceptions(diagram_out.failbit);
cmp_intervals_by_length cmp(cpx_);
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
- bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
- // Special case on windows, inf is "1.#INF"
- if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits<Filtration_value>::infinity()) {
- diagram_out << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " inf" << std::endl;
- } else {
- diagram_out << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " "
- << cpx_->filtration(get<1>(pair)) << std::endl;
- }
+ diagram_out << cpx_->dimension(get<0>(pair)) << " "
+ << cpx_->filtration(get<0>(pair)) << " "
+ << cpx_->filtration(get<1>(pair)) << std::endl;
}
}
@@ -739,7 +723,7 @@ class Persistent_cohomology {
boost::disjoint_sets<int *, Simplex_key *> dsets_;
/* The compressed annotation matrix fields.*/
Cam cam_;
- /* Dictionary establishing the correspondance between the Simplex_key of
+ /* Dictionary establishing the correspondence between the Simplex_key of
* the root vertex in the union-find ds and the Simplex_key of the vertex which
* created the connected component as a 0-dimension homology feature.*/
std::map<Simplex_key, Simplex_key> zero_cocycles_;
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
index 0673625c..f442b632 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
@@ -13,6 +13,7 @@
#include <utility>
#include <vector>
+#include <stdexcept>
namespace Gudhi {
@@ -33,15 +34,28 @@ class Field_Zp {
}
void init(int charac) {
- assert(charac > 0); // division by zero + non negative values
Prime = charac;
+
+ // Check that the provided prime is less than the maximum allowed as int, calculation below, and 'plus_times_equal' function : 46337 ; i.e (max_prime-1)*max_prime <= INT_MAX
+ if(Prime > 46337)
+ throw std::invalid_argument("Maximum homology_coeff_field allowed value is 46337");
+
+ // Check for primality
+ if (Prime <= 1)
+ throw std::invalid_argument("homology_coeff_field must be a prime number");
+
inverse_.clear();
inverse_.reserve(charac);
inverse_.push_back(0);
for (int i = 1; i < Prime; ++i) {
int inv = 1;
- while (((inv * i) % Prime) != 1)
+ int mult = inv * i;
+ while ( (mult % Prime) != 1) {
++inv;
+ if(mult == Prime)
+ throw std::invalid_argument("homology_coeff_field must be a prime number");
+ mult = inv * i;
+ }
inverse_.push_back(inv);
}
}
diff --git a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
index b9f11607..7a2feeff 100644
--- a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
+++ b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
@@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers )
// 2 1 0 inf
// means that in Z/2Z-homology, the Betti numbers are b0=2 and b1=1.
- std::cout << "BETTI NUMBERS" << std::endl;
+ std::clog << "BETTI NUMBERS" << std::endl;
BOOST_CHECK(pcoh.betti_number(0) == 2);
BOOST_CHECK(pcoh.betti_number(1) == 1);
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers )
BOOST_CHECK(bns[1] == 1);
BOOST_CHECK(bns[2] == 0);
- std::cout << "GET PERSISTENT PAIRS" << std::endl;
+ std::clog << "GET PERSISTENT PAIRS" << std::endl;
// Custom sort and output persistence
cmp_intervals_by_dim_then_length<Mini_simplex_tree> cmp(&st);
@@ -118,12 +118,12 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers )
BOOST_CHECK(st.filtration(get<0>(persistent_pairs[2])) == 0);
BOOST_CHECK(get<1>(persistent_pairs[2]) == st.null_simplex());
- std::cout << "INTERVALS IN DIMENSION" << std::endl;
+ std::clog << "INTERVALS IN DIMENSION" << std::endl;
auto intervals_in_dimension_0 = pcoh.intervals_in_dimension(0);
- std::cout << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl;
+ std::clog << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl;
for (std::size_t i = 0; i < intervals_in_dimension_0.size(); i++)
- std::cout << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," <<
+ std::clog << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," <<
intervals_in_dimension_0[i].second << "]" << std::endl;
BOOST_CHECK(intervals_in_dimension_0.size() == 2);
BOOST_CHECK(intervals_in_dimension_0[0].first == 0);
@@ -133,16 +133,16 @@ BOOST_AUTO_TEST_CASE( plain_homology_betti_numbers )
auto intervals_in_dimension_1 = pcoh.intervals_in_dimension(1);
- std::cout << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl;
+ std::clog << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl;
for (std::size_t i = 0; i < intervals_in_dimension_1.size(); i++)
- std::cout << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," <<
+ std::clog << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," <<
intervals_in_dimension_1[i].second << "]" << std::endl;
BOOST_CHECK(intervals_in_dimension_1.size() == 1);
BOOST_CHECK(intervals_in_dimension_1[0].first == 0);
BOOST_CHECK(intervals_in_dimension_1[0].second == std::numeric_limits<Mini_simplex_tree::Filtration_value>::infinity());
auto intervals_in_dimension_2 = pcoh.intervals_in_dimension(2);
- std::cout << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl;
+ std::clog << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl;
BOOST_CHECK(intervals_in_dimension_2.size() == 0);
}
@@ -259,12 +259,12 @@ BOOST_AUTO_TEST_CASE( betti_numbers )
BOOST_CHECK(st.filtration(get<0>(persistent_pairs[2])) == 1);
BOOST_CHECK(get<1>(persistent_pairs[2]) == st.null_simplex());
- std::cout << "INTERVALS IN DIMENSION" << std::endl;
+ std::clog << "INTERVALS IN DIMENSION" << std::endl;
auto intervals_in_dimension_0 = pcoh.intervals_in_dimension(0);
- std::cout << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl;
+ std::clog << "intervals_in_dimension_0.size() = " << intervals_in_dimension_0.size() << std::endl;
for (std::size_t i = 0; i < intervals_in_dimension_0.size(); i++)
- std::cout << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," <<
+ std::clog << "intervals_in_dimension_0[" << i << "] = [" << intervals_in_dimension_0[i].first << "," <<
intervals_in_dimension_0[i].second << "]" << std::endl;
BOOST_CHECK(intervals_in_dimension_0.size() == 2);
BOOST_CHECK(intervals_in_dimension_0[0].first == 2);
@@ -273,19 +273,19 @@ BOOST_AUTO_TEST_CASE( betti_numbers )
BOOST_CHECK(intervals_in_dimension_0[1].second == std::numeric_limits<Mini_simplex_tree::Filtration_value>::infinity());
auto intervals_in_dimension_1 = pcoh.intervals_in_dimension(1);
- std::cout << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl;
+ std::clog << "intervals_in_dimension_1.size() = " << intervals_in_dimension_1.size() << std::endl;
for (std::size_t i = 0; i < intervals_in_dimension_1.size(); i++)
- std::cout << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," <<
+ std::clog << "intervals_in_dimension_1[" << i << "] = [" << intervals_in_dimension_1[i].first << "," <<
intervals_in_dimension_1[i].second << "]" << std::endl;
BOOST_CHECK(intervals_in_dimension_1.size() == 1);
BOOST_CHECK(intervals_in_dimension_1[0].first == 4);
BOOST_CHECK(intervals_in_dimension_1[0].second == std::numeric_limits<Mini_simplex_tree::Filtration_value>::infinity());
auto intervals_in_dimension_2 = pcoh.intervals_in_dimension(2);
- std::cout << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl;
+ std::clog << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl;
BOOST_CHECK(intervals_in_dimension_2.size() == 0);
- std::cout << "EMPTY COMPLEX" << std::endl;
+ std::clog << "EMPTY COMPLEX" << std::endl;
Simplex_tree empty;
empty.initialize_filtration();
St_persistence pcoh_empty(empty, false);
diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
index a1c106d5..ea41a8aa 100644
--- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
+++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
@@ -21,7 +21,7 @@ using namespace boost::unit_test;
typedef Simplex_tree<> typeST;
-std::string test_rips_persistence(int coefficient, int min_persistence) {
+std::string test_persistence(int coefficient, int min_persistence) {
// file is copied in CMakeLists.txt
std::ifstream simplex_tree_stream;
simplex_tree_stream.open("simplex_tree_file_for_unit_test.txt");
@@ -30,7 +30,7 @@ std::string test_rips_persistence(int coefficient, int min_persistence) {
simplex_tree_stream.close();
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension()
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension()
<< std::endl;
// Check
@@ -44,16 +44,16 @@ std::string test_rips_persistence(int coefficient, int min_persistence) {
Persistent_cohomology<Simplex_tree<>, Field_Zp> pcoh(st);
pcoh.init_coefficients( coefficient ); // initializes the coefficient field for homology
- // Check infinite rips
+ // Compute the persistent homology of the complex
pcoh.compute_persistent_cohomology( min_persistence ); // Minimal lifetime of homology feature to be recorded.
- std::ostringstream ossInfinite;
+ std::ostringstream ossPers;
- pcoh.output_diagram(ossInfinite);
- std::string strInfinite = ossInfinite.str();
- return strInfinite;
+ pcoh.output_diagram(ossPers);
+ std::string strPers = ossPers.str();
+ return strPers;
}
-void test_rips_persistence_in_dimension(int dimension) {
+void test_persistence_with_coeff_field(int coeff_field) {
std::string value0(" 0 0.02 1.12");
std::string value1(" 0 0.03 1.13");
std::string value2(" 0 0.04 1.14");
@@ -65,112 +65,104 @@ void test_rips_persistence_in_dimension(int dimension) {
std::string value8(" 0 0 inf" );
std::string value9(" 0 0.01 inf" );
- value0.insert(0,std::to_string(dimension));
- value1.insert(0,std::to_string(dimension));
- value2.insert(0,std::to_string(dimension));
- value3.insert(0,std::to_string(dimension));
- value4.insert(0,std::to_string(dimension));
- value5.insert(0,std::to_string(dimension));
- value6.insert(0,std::to_string(dimension));
- value7.insert(0,std::to_string(dimension));
- value8.insert(0,std::to_string(dimension));
- value9.insert(0,std::to_string(dimension));
-
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=0" << std::endl;
-
- std::string str_rips_persistence = test_rips_persistence(dimension, 0);
- std::cout << str_rips_persistence << std::endl;
+ value0.insert(0,std::to_string(coeff_field));
+ value1.insert(0,std::to_string(coeff_field));
+ value2.insert(0,std::to_string(coeff_field));
+ value3.insert(0,std::to_string(coeff_field));
+ value4.insert(0,std::to_string(coeff_field));
+ value5.insert(0,std::to_string(coeff_field));
+ value6.insert(0,std::to_string(coeff_field));
+ value7.insert(0,std::to_string(coeff_field));
+ value8.insert(0,std::to_string(coeff_field));
+ value9.insert(0,std::to_string(coeff_field));
+
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=0" << std::endl;
+
+ std::string str_persistence = test_persistence(coeff_field, 0);
+ std::clog << str_persistence << std::endl;
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl;
-
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=1" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, 1);
-
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl;
-
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=2" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, 2);
-
- BOOST_CHECK(str_rips_persistence.find(value0) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value1) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value2) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl;
-
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=Inf" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, (std::numeric_limits<int>::max)());
-
- BOOST_CHECK(str_rips_persistence.find(value0) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value1) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value2) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
+
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=1" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, 1);
+
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
+
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=2" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, 2);
+
+ BOOST_CHECK(str_persistence.find(value0) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value1) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value2) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
+
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=Inf" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, (std::numeric_limits<int>::max)());
+
+ BOOST_CHECK(str_persistence.find(value0) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value1) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value2) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_1 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_not_prime )
{
- test_rips_persistence_in_dimension(1);
+ for (auto non_prime : {0, 1, 4, 6})
+ BOOST_CHECK_THROW(test_persistence_with_coeff_field(non_prime), std::invalid_argument);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_2 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_prime )
{
- test_rips_persistence_in_dimension(2);
+ for (auto prime : {2, 3, 5, 11, 13})
+ test_persistence_with_coeff_field(prime);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_3 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_limit )
{
- test_rips_persistence_in_dimension(3);
+ BOOST_CHECK_THROW(test_persistence_with_coeff_field(46349), std::invalid_argument);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_5 )
-{
- test_rips_persistence_in_dimension(5);
-}
-
-// TODO(VR): not working from 6
-// std::string str_rips_persistence = test_rips_persistence(6, 0);
-// TODO(VR): division by zero
-// std::string str_rips_persistence = test_rips_persistence(0, 0);
-
/** SimplexTree minimal options to test the limits.
*
* Maximum number of simplices to compute persistence is <CODE>std::numeric_limits<std::uint8_t>::max()<\CODE> = 256.*/
diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
index 9e767943..c6c0bfaf 100644
--- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
+++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
@@ -21,7 +21,7 @@ using namespace boost::unit_test;
typedef Simplex_tree<> typeST;
-std::string test_rips_persistence(int min_coefficient, int max_coefficient, double min_persistence) {
+std::string test_persistence(int min_coefficient, int max_coefficient, double min_persistence) {
// file is copied in CMakeLists.txt
std::ifstream simplex_tree_stream;
simplex_tree_stream.open("simplex_tree_file_for_multi_field_unit_test.txt");
@@ -30,7 +30,7 @@ std::string test_rips_persistence(int min_coefficient, int max_coefficient, doub
simplex_tree_stream.close();
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension()
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << " - dimension= " << st.dimension()
<< std::endl;
// Check
@@ -44,17 +44,17 @@ std::string test_rips_persistence(int min_coefficient, int max_coefficient, doub
Persistent_cohomology<Simplex_tree<>, Multi_field> pcoh(st);
pcoh.init_coefficients(min_coefficient, max_coefficient); // initializes the coefficient field for homology
- // Check infinite rips
+ // Compute the persistent homology of the complex
pcoh.compute_persistent_cohomology(min_persistence); // Minimal lifetime of homology feature to be recorded.
- std::ostringstream ossRips;
- pcoh.output_diagram(ossRips);
+ std::ostringstream ossPers;
+ pcoh.output_diagram(ossPers);
- std::string strRips = ossRips.str();
- return strRips;
+ std::string strPers = ossPers.str();
+ return strPers;
}
-void test_rips_persistence_in_dimension(int min_dimension, int max_dimension) {
+void test_persistence_with_coeff_field(int min_coefficient, int max_coefficient) {
// there are 2 discontinued ensembles
std::string value0(" 0 0.25 inf");
std::string value1(" 1 0.4 inf");
@@ -68,48 +68,60 @@ void test_rips_persistence_in_dimension(int min_dimension, int max_dimension) {
std::string value6(" 2 0.3 inf");
std::string value7(" 2 0.4 inf");
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_DIM=" << min_dimension << " MAX_DIM=" << max_dimension << " MIN_PERS=0" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_COEFF=" << min_coefficient << " MAX_COEFF=" << max_coefficient << " MIN_PERS=0" << std::endl;
- std::string str_rips_persistence = test_rips_persistence(min_dimension, max_dimension, 0.0);
- std::cout << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ std::string str_persistence = test_persistence(min_coefficient, max_coefficient, 0.0);
+ std::clog << "str_persistence=" << str_persistence << std::endl;
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
- if ((min_dimension < 2) && (max_dimension < 2)) {
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
+ if ((min_coefficient < 2) && (max_coefficient < 2)) {
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
} else {
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
}
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_1_2) {
- test_rips_persistence_in_dimension(0, 1);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_0) {
+ test_persistence_with_coeff_field(0, 0);
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_2_3) {
- test_rips_persistence_in_dimension(1, 3);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_1) {
+ test_persistence_with_coeff_field(0, 1);
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_1_5) {
- test_rips_persistence_in_dimension(1, 5);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_6) {
+ test_persistence_with_coeff_field(0, 6);
}
-// TODO(VR): not working from 6
-// std::string str_rips_persistence = test_rips_persistence(6, 0);
-// TODO(VR): division by zero
-// std::string str_rips_persistence = test_rips_persistence(0, 0);
-// TODO(VR): is result OK of :
-// test_rips_persistence_in_dimension(3, 4);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_2) {
+ test_persistence_with_coeff_field(1, 2);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_3) {
+ test_persistence_with_coeff_field(1, 3);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_5) {
+ test_persistence_with_coeff_field(1, 5);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_2_3) {
+ test_persistence_with_coeff_field(2, 3);
+}
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_3_4) {
+ test_persistence_with_coeff_field(3, 4);
+}
diff --git a/src/Rips_complex/doc/Intro_rips_complex.h b/src/Rips_complex/doc/Intro_rips_complex.h
index b2840686..cd77b327 100644
--- a/src/Rips_complex/doc/Intro_rips_complex.h
+++ b/src/Rips_complex/doc/Intro_rips_complex.h
@@ -63,9 +63,8 @@ namespace rips_complex {
* value set with \f$max(filtration(4,5), filtration(4,6), filtration(5,6))\f$.
* And so on for simplex (0,1,2,3).
*
- * If the Rips_complex interfaces are not detailed enough for your need, please refer to
- * <a href="_persistent_cohomology_2rips_persistence_step_by_step_8cpp-example.html">
- * rips_persistence_step_by_step.cpp</a> example, where the constructions of the graph and
+ * If the Rips_complex interfaces are not detailed enough for your need, please refer to the example
+ * \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp} , where the constructions of the graph and
* the Simplex_tree are more detailed.
*
* \section sparserips Sparse Rips complex
@@ -111,7 +110,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_points.cpp
+ * \include example_one_skeleton_rips_from_points.cpp
*
* When launching (Rips maximal distance between 2 points is 12.0, is expanded
* until dimension 1 - one skeleton graph in other words):
@@ -121,7 +120,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_for_doc.txt
+ * \include one_skeleton_rips_for_doc.txt
*
* \subsection ripsoffexample Example from OFF file
*
@@ -132,7 +131,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the Rips complex.
*
- * \include Rips_complex/example_rips_complex_from_off_file.cpp
+ * \include example_rips_complex_from_off_file.cpp
*
* When launching:
*
@@ -141,7 +140,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/full_skeleton_rips_for_doc.txt
+ * \include full_skeleton_rips_for_doc.txt
*
*
* \subsection sparseripspointscloudexample Example of a sparse Rips from a point cloud
@@ -149,7 +148,7 @@ namespace rips_complex {
* This example builds the full sparse Rips of a set of 2D Euclidean points, then prints some minimal
* information about the complex.
*
- * \include Rips_complex/example_sparse_rips.cpp
+ * \include example_sparse_rips.cpp
*
* When launching:
*
@@ -172,7 +171,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp
+ * \include example_one_skeleton_rips_from_distance_matrix.cpp
*
* When launching (Rips maximal distance between 2 points is 1.0, is expanded until dimension 1 - one skeleton graph
* with other words):
@@ -182,7 +181,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_for_doc.txt
+ * \include one_skeleton_rips_for_doc.txt
*
* \subsection ripscsvdistanceexample Example from a distance matrix read in a csv file
*
@@ -192,7 +191,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the Rips complex.
*
- * \include Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp
+ * \include example_rips_complex_from_csv_distance_matrix_file.cpp
*
* When launching:
*
@@ -201,7 +200,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/full_skeleton_rips_for_doc.txt
+ * \include full_skeleton_rips_for_doc.txt
*
*
* \section ripscorrelationematrix Correlation matrix
@@ -213,7 +212,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_correlation_matrix.cpp
+ * \include example_one_skeleton_rips_from_correlation_matrix.cpp
*
* When launching:
*
@@ -222,7 +221,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_from_correlation_matrix_for_doc.txt
+ * \include one_skeleton_rips_from_correlation_matrix_for_doc.txt
*
* All the other constructions discussed for Rips complex for distance matrix can be also performed for Rips complexes
* construction from correlation matrices.
diff --git a/src/Rips_complex/example/CMakeLists.txt b/src/Rips_complex/example/CMakeLists.txt
index e7772bdb..206f4c11 100644
--- a/src/Rips_complex/example/CMakeLists.txt
+++ b/src/Rips_complex/example/CMakeLists.txt
@@ -53,19 +53,22 @@ if (DIFF_PATH)
add_test(Rips_complex_example_from_off_doc_12_1_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_1.txt
${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_off_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_1)
+
add_test(Rips_complex_example_from_off_doc_12_3_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_3.txt
${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_off_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_3)
+
add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_1.txt
${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_1)
+
add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_3.txt
${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_3)
+
endif()
-install(TARGETS Rips_complex_example_from_off DESTINATION bin)
-install(TARGETS Rips_complex_example_one_skeleton_from_points DESTINATION bin)
-install(TARGETS Rips_complex_example_one_skeleton_from_distance_matrix DESTINATION bin)
-install(TARGETS Rips_complex_example_from_csv_distance_matrix DESTINATION bin)
-install(TARGETS Rips_complex_example_one_skeleton_rips_from_correlation_matrix DESTINATION bin)
diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
index 05bacb9f..3811d1f1 100644
--- a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
+++ b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
@@ -40,7 +40,7 @@ int main() {
throw "The input matrix is not a correlation matrix. The program will now terminate.\n";
}
correlations[i][j] = 1 - correlations[i][j];
- // Here we make sure that we will get the treshold value equal to maximal
+ // Here we make sure that we will get the threshold value equal to maximal
// distance in the matrix.
if (correlations[i][j] > threshold) threshold = correlations[i][j];
}
@@ -63,18 +63,18 @@ int main() {
// have a reverse filtration (i.e. filtration of boundary of each simplex S
// is greater or equal to the filtration of S).
// ----------------------------------------------------------------------------
- std::cout << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
+ std::clog << "Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices() << " simplices - "
<< stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> "
+ std::clog << ") -> "
<< "[" << stree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << std::endl;
}
return 0;
diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp
index bbc3c755..25f93b03 100644
--- a/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp
+++ b/src/Rips_complex/example/example_one_skeleton_rips_from_distance_matrix.cpp
@@ -39,19 +39,19 @@ int main() {
// ----------------------------------------------------------------------------
// Display information about the one skeleton Rips complex
// ----------------------------------------------------------------------------
- std::cout << "Rips complex is of dimension " << stree.dimension() <<
+ std::clog << "Rips complex is of dimension " << stree.dimension() <<
" - " << stree.num_simplices() << " simplices - " <<
stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" <<
+ std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" <<
std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << stree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
return 0;
diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp
index a1db8910..d9df245b 100644
--- a/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp
+++ b/src/Rips_complex/example/example_one_skeleton_rips_from_points.cpp
@@ -34,19 +34,19 @@ int main() {
// ----------------------------------------------------------------------------
// Display information about the one skeleton Rips complex
// ----------------------------------------------------------------------------
- std::cout << "Rips complex is of dimension " << stree.dimension() <<
+ std::clog << "Rips complex is of dimension " << stree.dimension() <<
" - " << stree.num_simplices() << " simplices - " <<
stree.num_vertices() << " vertices." << std::endl;
- std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" <<
+ std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" <<
std::endl;
for (auto f_simplex : stree.filtration_simplex_range()) {
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << stree.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
return 0;
}
diff --git a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
index b7040453..c0c57e7b 100644
--- a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
+++ b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
@@ -42,7 +42,7 @@ int main(int argc, char **argv) {
ouput_file_stream.open(std::string(argv[4]));
streambuffer = ouput_file_stream.rdbuf();
} else {
- streambuffer = std::cout.rdbuf();
+ streambuffer = std::clog.rdbuf();
}
Simplex_tree stree;
diff --git a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
index 36b468a7..9aa7a657 100644
--- a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
+++ b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
@@ -41,7 +41,7 @@ int main(int argc, char **argv) {
ouput_file_stream.open(std::string(argv[4]));
streambuffer = ouput_file_stream.rdbuf();
} else {
- streambuffer = std::cout.rdbuf();
+ streambuffer = std::clog.rdbuf();
}
Simplex_tree stree;
diff --git a/src/Rips_complex/example/example_sparse_rips.cpp b/src/Rips_complex/example/example_sparse_rips.cpp
index 1c95b48c..4bd31103 100644
--- a/src/Rips_complex/example/example_sparse_rips.cpp
+++ b/src/Rips_complex/example/example_sparse_rips.cpp
@@ -25,6 +25,6 @@ int main() {
// ----------------------------------------------------------------------------
// Display information about the complex
// ----------------------------------------------------------------------------
- std::cout << "Sparse Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices()
+ std::clog << "Sparse Rips complex is of dimension " << stree.dimension() << " - " << stree.num_simplices()
<< " simplices - " << stree.num_vertices() << " vertices." << std::endl;
}
diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
index 1b250818..7ae7b317 100644
--- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
+++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
@@ -15,12 +15,71 @@
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/choose_n_farthest_points.h>
-#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/graph_traits.hpp>
#include <boost/range/metafunctions.hpp>
+#include <boost/iterator/counting_iterator.hpp>
#include <vector>
namespace Gudhi {
+namespace rips_complex {
+// A custom graph class, because boost::adjacency_list does not conveniently allow to choose vertex descriptors
+template <class Vertex_handle, class Filtration_value>
+struct Graph {
+ typedef std::vector<Vertex_handle> VList;
+ typedef std::vector<std::tuple<Vertex_handle, Vertex_handle, Filtration_value>> EList;
+ typedef typename VList::const_iterator vertex_iterator;
+ typedef boost::counting_iterator<std::size_t> edge_iterator;
+ VList vlist;
+ EList elist;
+};
+template <class Vertex_handle, class Filtration_value>
+void add_vertex(Vertex_handle v, Graph<Vertex_handle, Filtration_value>&g) { g.vlist.push_back(v); }
+template <class Vertex_handle, class Filtration_value>
+void add_edge(Vertex_handle u, Vertex_handle v, Filtration_value f, Graph<Vertex_handle, Filtration_value>&g) { g.elist.emplace_back(u, v, f); }
+template <class Vertex_handle, class Filtration_value>
+std::size_t num_vertices(Graph<Vertex_handle, Filtration_value> const&g) { return g.vlist.size(); }
+template <class Vertex_handle, class Filtration_value>
+std::size_t num_edges(Graph<Vertex_handle, Filtration_value> const&g) { return g.elist.size(); }
+template <class Vertex_handle, class Filtration_value, class Iter = typename Graph<Vertex_handle, Filtration_value>::vertex_iterator>
+std::pair<Iter, Iter>
+vertices(Graph<Vertex_handle, Filtration_value> const&g) {
+ return { g.vlist.begin(), g.vlist.end() };
+}
+template <class Vertex_handle, class Filtration_value>
+std::pair<boost::counting_iterator<std::size_t>, boost::counting_iterator<std::size_t>>
+edges(Graph<Vertex_handle, Filtration_value> const&g) {
+ typedef boost::counting_iterator<std::size_t> I;
+ return { I(0), I(g.elist.size()) };
+}
+template <class Vertex_handle, class Filtration_value>
+Vertex_handle source(std::size_t e, Graph<Vertex_handle, Filtration_value> const&g) { return std::get<0>(g.elist[e]); }
+template <class Vertex_handle, class Filtration_value>
+Vertex_handle target(std::size_t e, Graph<Vertex_handle, Filtration_value> const&g) { return std::get<1>(g.elist[e]); }
+template <class Vertex_handle, class Filtration_value>
+Filtration_value get(vertex_filtration_t, Graph<Vertex_handle, Filtration_value> const&, Vertex_handle) { return 0; }
+template <class Vertex_handle, class Filtration_value>
+Filtration_value get(edge_filtration_t, Graph<Vertex_handle, Filtration_value> const&g, std::size_t e) { return std::get<2>(g.elist[e]); }
+} // namespace rips_complex
+} // namespace Gudhi
+namespace boost {
+template <class Vertex_handle, class Filtration_value>
+struct graph_traits<Gudhi::rips_complex::Graph<Vertex_handle, Filtration_value>> {
+ typedef Gudhi::rips_complex::Graph<Vertex_handle, Filtration_value> G;
+ struct traversal_category : vertex_list_graph_tag, edge_list_graph_tag {};
+ typedef Vertex_handle vertex_descriptor;
+ typedef typename G::vertex_iterator vertex_iterator;
+ typedef std::size_t vertices_size_type;
+ typedef std::size_t edge_descriptor;
+ typedef typename G::edge_iterator edge_iterator;
+ typedef std::size_t edges_size_type;
+ typedef directed_tag directed_category;
+ typedef disallow_parallel_edge_tag edge_parallel_category;
+};
+// Etc, since we don't expose this graph to the world, we know we are not going to query property_traits for instance.
+}
+
+namespace Gudhi {
namespace rips_complex {
@@ -45,12 +104,8 @@ template <typename Filtration_value>
class Sparse_rips_complex {
private:
// TODO(MG): use a different graph where we know we can safely insert in parallel.
- typedef typename boost::adjacency_list<boost::vecS, boost::vecS, boost::directedS,
- boost::property<vertex_filtration_t, Filtration_value>,
- boost::property<edge_filtration_t, Filtration_value>>
- Graph;
-
typedef int Vertex_handle;
+ typedef rips_complex::Graph<Vertex_handle, Filtration_value> Graph;
public:
/** \brief Sparse_rips_complex constructor from a list of points.
@@ -63,12 +118,12 @@ class Sparse_rips_complex {
*
*/
template <typename RandomAccessPointRange, typename Distance>
- Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double epsilon, Filtration_value mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value maxi=std::numeric_limits<Filtration_value>::infinity())
+ Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double const epsilon, Filtration_value const mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value const maxi=std::numeric_limits<Filtration_value>::infinity())
: epsilon_(epsilon) {
GUDHI_CHECK(epsilon > 0, "epsilon must be positive");
auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); };
- Ker<decltype(dist_fun)> kernel(dist_fun);
- subsampling::choose_n_farthest_points(kernel, boost::irange<Vertex_handle>(0, boost::size(points)), -1, -1,
+ // TODO: stop choose_n_farthest_points once it reaches mini or 0?
+ subsampling::choose_n_farthest_points(dist_fun, boost::irange<Vertex_handle>(0, boost::size(points)), -1, -1,
std::back_inserter(sorted_points), std::back_inserter(params));
compute_sparse_graph(dist_fun, epsilon, mini, maxi);
}
@@ -84,7 +139,7 @@ class Sparse_rips_complex {
* @param[in] maxi Maximal filtration value. Ignore anything above this scale.
*/
template <typename DistanceMatrix>
- Sparse_rips_complex(const DistanceMatrix& distance_matrix, double epsilon, Filtration_value mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value maxi=std::numeric_limits<Filtration_value>::infinity())
+ Sparse_rips_complex(const DistanceMatrix& distance_matrix, double const epsilon, Filtration_value const mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value const maxi=std::numeric_limits<Filtration_value>::infinity())
: Sparse_rips_complex(boost::irange<Vertex_handle>(0, boost::size(distance_matrix)),
[&](Vertex_handle i, Vertex_handle j) { return (i==j) ? 0 : (i<j) ? distance_matrix[j][i] : distance_matrix[i][j]; },
epsilon, mini, maxi) {}
@@ -100,7 +155,7 @@ class Sparse_rips_complex {
*
*/
template <typename SimplicialComplexForRips>
- void create_complex(SimplicialComplexForRips& complex, int dim_max) {
+ void create_complex(SimplicialComplexForRips& complex, int const dim_max) {
GUDHI_CHECK(complex.num_vertices() == 0,
std::invalid_argument("Sparse_rips_complex::create_complex - simplicial complex is not empty"));
@@ -109,17 +164,17 @@ class Sparse_rips_complex {
complex.expansion(dim_max);
return;
}
- const int n = boost::size(params);
- std::vector<Filtration_value> lambda(n);
+ const std::size_t n = num_vertices(graph_);
+ std::vector<Filtration_value> lambda(max_v + 1);
// lambda[original_order]=params[sorted_order]
- for(int i=0;i<n;++i)
+ for(std::size_t i=0;i<n;++i)
lambda[sorted_points[i]] = params[i];
double cst = epsilon_ * (1 - epsilon_) / 2;
auto block = [cst,&complex,&lambda](typename SimplicialComplexForRips::Simplex_handle sh){
auto filt = complex.filtration(sh);
- auto mini = filt * cst;
+ auto min_f = filt * cst;
for(auto v : complex.simplex_vertex_range(sh)){
- if(lambda[v] < mini)
+ if(lambda[v] < min_f)
return true; // v died before this simplex could be born
}
return false;
@@ -128,45 +183,36 @@ class Sparse_rips_complex {
}
private:
- // choose_n_farthest_points wants the distance function in this form...
- template <class Distance>
- struct Ker {
- typedef std::size_t Point_d; // index into point range
- Ker(Distance& d) : dist(d) {}
- // Despite the name, this is not squared...
- typedef Distance Squared_distance_d;
- Squared_distance_d& squared_distance_d_object() const { return dist; }
- Distance& dist;
- };
-
// PointRange must be random access.
template <typename Distance>
- void compute_sparse_graph(Distance& dist, double epsilon, Filtration_value mini, Filtration_value maxi) {
+ void compute_sparse_graph(Distance& dist, double const epsilon, Filtration_value const mini, Filtration_value const maxi) {
const auto& points = sorted_points; // convenience alias
- const int n = boost::size(points);
+ std::size_t n = boost::size(points);
double cst = epsilon * (1 - epsilon) / 2;
- graph_.~Graph();
- new (&graph_) Graph(n);
- // for(auto v : vertices(g)) // doesn't work :-(
- typename boost::graph_traits<Graph>::vertex_iterator v_i, v_e;
- for (std::tie(v_i, v_e) = vertices(graph_); v_i != v_e; ++v_i) {
- auto v = *v_i;
- // This whole loop might not be necessary, leave it until someone investigates if it is safe to remove.
- put(vertex_filtration_t(), graph_, v, 0);
+ max_v = -1; // Useful for the size of the map lambda.
+ for (std::size_t i = 0; i < n; ++i) {
+ if ((params[i] < mini || params[i] <= 0) && i != 0) break;
+ // The parameter of the first point is not very meaningful, it is supposed to be infinite,
+ // but if the type does not support it...
+ // It would be better to do this reduction of the number of points earlier, around choose_n_farthest_points.
+ add_vertex(points[i], graph_);
+ max_v = std::max(max_v, points[i]);
}
+ n = num_vertices(graph_);
// TODO(MG):
// - make it parallel
// - only test near-enough neighbors
- for (int i = 0; i < n; ++i) {
+ for (std::size_t i = 0; i < n; ++i) {
auto&& pi = points[i];
auto li = params[i];
- if (li < mini) break;
- for (int j = i + 1; j < n; ++j) {
+ // If we inserted all the points, points with multiplicity would get connected to their first representative,
+ // no need to handle the redundant ones in the outer loop.
+ // if (li <= 0 && i != 0) break;
+ for (std::size_t j = i + 1; j < n; ++j) {
auto&& pj = points[j];
auto d = dist(pi, pj);
auto lj = params[j];
- if (lj < mini) break;
GUDHI_CHECK(lj <= li, "Bad furthest point sorting");
Filtration_value alpha;
@@ -190,6 +236,7 @@ class Sparse_rips_complex {
Graph graph_;
double epsilon_;
+ Vertex_handle max_v;
// Because of the arbitrary split between constructor and create_complex
// sorted_points[sorted_order]=original_order
std::vector<Vertex_handle> sorted_points;
diff --git a/src/Rips_complex/test/test_rips_complex.cpp b/src/Rips_complex/test/test_rips_complex.cpp
index 1225f8df..19dcd283 100644
--- a/src/Rips_complex/test/test_rips_complex.cpp
+++ b/src/Rips_complex/test/test_rips_complex.cpp
@@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) {
// ----------------------------------------------------------------------------
std::string off_file_name("alphacomplexdoc.off");
double rips_threshold = 12.0;
- std::cout << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" <<
+ std::clog << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" <<
rips_threshold << "==========" << std::endl;
Gudhi::Points_off_reader<Point> off_reader(off_file_name);
@@ -52,14 +52,14 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) {
const int DIMENSION_1 = 1;
Simplex_tree st;
rips_complex_from_file.create_complex(st, DIMENSION_1);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION_1);
const int NUMBER_OF_VERTICES = 7;
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 18);
// Check filtration values of vertices is 0.0
@@ -71,12 +71,12 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) {
for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) {
if (DIMENSION_1 == st.dimension(f_simplex)) {
std::vector<Point> vp;
- std::cout << "vertex = (";
+ std::clog << "vertex = (";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << ",";
+ std::clog << vertex << ",";
vp.push_back(off_reader.get_point_cloud().at(vertex));
}
- std::cout << ") - distance =" << Gudhi::Euclidean_distance()(vp.at(0), vp.at(1)) <<
+ std::clog << ") - distance =" << Gudhi::Euclidean_distance()(vp.at(0), vp.at(1)) <<
" - filtration =" << st.filtration(f_simplex) << std::endl;
BOOST_CHECK(vp.size() == 2);
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), Gudhi::Euclidean_distance()(vp.at(0), vp.at(1)));
@@ -86,46 +86,46 @@ BOOST_AUTO_TEST_CASE(RIPS_DOC_OFF_file) {
const int DIMENSION_2 = 2;
Simplex_tree st2;
rips_complex_from_file.create_complex(st2, DIMENSION_2);
- std::cout << "st2.dimension()=" << st2.dimension() << std::endl;
+ std::clog << "st2.dimension()=" << st2.dimension() << std::endl;
BOOST_CHECK(st2.dimension() == DIMENSION_2);
- std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
+ std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
+ std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
BOOST_CHECK(st2.num_simplices() == 23);
Simplex_tree::Filtration_value f01 = st2.filtration(st2.find({0, 1}));
Simplex_tree::Filtration_value f02 = st2.filtration(st2.find({0, 2}));
Simplex_tree::Filtration_value f12 = st2.filtration(st2.find({1, 2}));
Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
- std::cout << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl;
+ std::clog << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::max(f01, std::max(f02,f12)));
Simplex_tree::Filtration_value f45 = st2.filtration(st2.find({4, 5}));
Simplex_tree::Filtration_value f56 = st2.filtration(st2.find({5, 6}));
Simplex_tree::Filtration_value f46 = st2.filtration(st2.find({4, 6}));
Simplex_tree::Filtration_value f456 = st2.filtration(st2.find({4, 5, 6}));
- std::cout << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl;
+ std::clog << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f456, std::max(f45, std::max(f56,f46)));
const int DIMENSION_3 = 3;
Simplex_tree st3;
rips_complex_from_file.create_complex(st3, DIMENSION_3);
- std::cout << "st3.dimension()=" << st3.dimension() << std::endl;
+ std::clog << "st3.dimension()=" << st3.dimension() << std::endl;
BOOST_CHECK(st3.dimension() == DIMENSION_3);
- std::cout << "st3.num_vertices()=" << st3.num_vertices() << std::endl;
+ std::clog << "st3.num_vertices()=" << st3.num_vertices() << std::endl;
BOOST_CHECK(st3.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st3.num_simplices()=" << st3.num_simplices() << std::endl;
+ std::clog << "st3.num_simplices()=" << st3.num_simplices() << std::endl;
BOOST_CHECK(st3.num_simplices() == 24);
Simplex_tree::Filtration_value f123 = st3.filtration(st3.find({1, 2, 3}));
Simplex_tree::Filtration_value f013 = st3.filtration(st3.find({0, 1, 3}));
Simplex_tree::Filtration_value f023 = st3.filtration(st3.find({0, 2, 3}));
Simplex_tree::Filtration_value f0123 = st3.filtration(st3.find({0, 1, 2, 3}));
- std::cout << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 <<
+ std::clog << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 <<
" - f023= " << f023 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f0123, std::max(f012, std::max(f123, std::max(f013, f023))));
@@ -176,34 +176,34 @@ BOOST_AUTO_TEST_CASE(Rips_complex_from_points) {
// ----------------------------------------------------------------------------
Rips_complex rips_complex_from_points(points, 2.0, Custom_square_euclidean_distance());
- std::cout << "========== Rips_complex_from_points ==========" << std::endl;
+ std::clog << "========== Rips_complex_from_points ==========" << std::endl;
Simplex_tree st;
const int DIMENSION = 3;
rips_complex_from_points.create_complex(st, DIMENSION);
// Another way to check num_simplices
- std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
for (auto f_simplex : st.filtration_simplex_range()) {
num_simplices++;
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << st.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
BOOST_CHECK(num_simplices == 15);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 15);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION);
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == 4);
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
+ std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
switch (st.dimension(f_simplex)) {
case 0:
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0);
@@ -241,34 +241,34 @@ BOOST_AUTO_TEST_CASE(Sparse_rips_complex_from_points) {
// .001 is small enough that we get a deterministic result matching the exact Rips
Sparse_rips_complex sparse_rips(points, Custom_square_euclidean_distance(), .001);
- std::cout << "========== Sparse_rips_complex_from_points ==========" << std::endl;
+ std::clog << "========== Sparse_rips_complex_from_points ==========" << std::endl;
Simplex_tree st;
const int DIMENSION = 3;
sparse_rips.create_complex(st, DIMENSION);
// Another way to check num_simplices
- std::cout << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Rips complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
for (auto f_simplex : st.filtration_simplex_range()) {
num_simplices++;
- std::cout << " ( ";
+ std::clog << " ( ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << ") -> " << "[" << st.filtration(f_simplex) << "] ";
- std::cout << std::endl;
+ std::clog << ") -> " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << std::endl;
}
BOOST_CHECK(num_simplices == 15);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 15);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION);
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == 4);
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
+ std::clog << "dimension(" << st.dimension(f_simplex) << ") - f = " << st.filtration(f_simplex) << std::endl;
switch (st.dimension(f_simplex)) {
case 0:
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), 0.0);
@@ -293,7 +293,7 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) {
// ----------------------------------------------------------------------------
std::string csv_file_name("full_square_distance_matrix.csv");
double rips_threshold = 12.0;
- std::cout << "========== CSV FILE NAME = " << csv_file_name << " - Rips threshold=" <<
+ std::clog << "========== CSV FILE NAME = " << csv_file_name << " - Rips threshold=" <<
rips_threshold << "==========" << std::endl;
Distance_matrix distances = Gudhi::read_lower_triangular_matrix_from_csv_file<Filtration_value>(csv_file_name);
@@ -302,14 +302,14 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) {
const int DIMENSION_1 = 1;
Simplex_tree st;
rips_complex_from_file.create_complex(st, DIMENSION_1);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
BOOST_CHECK(st.dimension() == DIMENSION_1);
const int NUMBER_OF_VERTICES = 7;
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st.num_simplices()=" << st.num_simplices() << std::endl;
+ std::clog << "st.num_simplices()=" << st.num_simplices() << std::endl;
BOOST_CHECK(st.num_simplices() == 18);
// Check filtration values of vertices is 0.0
@@ -321,12 +321,12 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) {
for (auto f_simplex : st.skeleton_simplex_range(DIMENSION_1)) {
if (DIMENSION_1 == st.dimension(f_simplex)) {
std::vector<Simplex_tree::Vertex_handle> vvh;
- std::cout << "vertex = (";
+ std::clog << "vertex = (";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << ",";
+ std::clog << vertex << ",";
vvh.push_back(vertex);
}
- std::cout << ") - filtration =" << st.filtration(f_simplex) << std::endl;
+ std::clog << ") - filtration =" << st.filtration(f_simplex) << std::endl;
BOOST_CHECK(vvh.size() == 2);
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex), distances[vvh.at(0)][vvh.at(1)]);
}
@@ -335,46 +335,46 @@ BOOST_AUTO_TEST_CASE(Rips_doc_csv_file) {
const int DIMENSION_2 = 2;
Simplex_tree st2;
rips_complex_from_file.create_complex(st2, DIMENSION_2);
- std::cout << "st2.dimension()=" << st2.dimension() << std::endl;
+ std::clog << "st2.dimension()=" << st2.dimension() << std::endl;
BOOST_CHECK(st2.dimension() == DIMENSION_2);
- std::cout << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
+ std::clog << "st2.num_vertices()=" << st2.num_vertices() << std::endl;
BOOST_CHECK(st2.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
+ std::clog << "st2.num_simplices()=" << st2.num_simplices() << std::endl;
BOOST_CHECK(st2.num_simplices() == 23);
Simplex_tree::Filtration_value f01 = st2.filtration(st2.find({0, 1}));
Simplex_tree::Filtration_value f02 = st2.filtration(st2.find({0, 2}));
Simplex_tree::Filtration_value f12 = st2.filtration(st2.find({1, 2}));
Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
- std::cout << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl;
+ std::clog << "f012= " << f012 << " | f01= " << f01 << " - f02= " << f02 << " - f12= " << f12 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::max(f01, std::max(f02,f12)));
Simplex_tree::Filtration_value f45 = st2.filtration(st2.find({4, 5}));
Simplex_tree::Filtration_value f56 = st2.filtration(st2.find({5, 6}));
Simplex_tree::Filtration_value f46 = st2.filtration(st2.find({4, 6}));
Simplex_tree::Filtration_value f456 = st2.filtration(st2.find({4, 5, 6}));
- std::cout << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl;
+ std::clog << "f456= " << f456 << " | f45= " << f45 << " - f56= " << f56 << " - f46= " << f46 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f456, std::max(f45, std::max(f56,f46)));
const int DIMENSION_3 = 3;
Simplex_tree st3;
rips_complex_from_file.create_complex(st3, DIMENSION_3);
- std::cout << "st3.dimension()=" << st3.dimension() << std::endl;
+ std::clog << "st3.dimension()=" << st3.dimension() << std::endl;
BOOST_CHECK(st3.dimension() == DIMENSION_3);
- std::cout << "st3.num_vertices()=" << st3.num_vertices() << std::endl;
+ std::clog << "st3.num_vertices()=" << st3.num_vertices() << std::endl;
BOOST_CHECK(st3.num_vertices() == NUMBER_OF_VERTICES);
- std::cout << "st3.num_simplices()=" << st3.num_simplices() << std::endl;
+ std::clog << "st3.num_simplices()=" << st3.num_simplices() << std::endl;
BOOST_CHECK(st3.num_simplices() == 24);
Simplex_tree::Filtration_value f123 = st3.filtration(st3.find({1, 2, 3}));
Simplex_tree::Filtration_value f013 = st3.filtration(st3.find({0, 1, 3}));
Simplex_tree::Filtration_value f023 = st3.filtration(st3.find({0, 2, 3}));
Simplex_tree::Filtration_value f0123 = st3.filtration(st3.find({0, 1, 2, 3}));
- std::cout << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 <<
+ std::clog << "f0123= " << f0123 << " | f012= " << f012 << " - f123= " << f123 << " - f013= " << f013 <<
" - f023= " << f023 << std::endl;
GUDHI_TEST_FLOAT_EQUALITY_CHECK(f0123, std::max(f012, std::max(f123, std::max(f013, f023))));
@@ -389,7 +389,7 @@ BOOST_AUTO_TEST_CASE(Rips_create_complex_throw) {
// ----------------------------------------------------------------------------
std::string off_file_name("alphacomplexdoc.off");
double rips_threshold = 12.0;
- std::cout << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" <<
+ std::clog << "========== OFF FILE NAME = " << off_file_name << " - Rips threshold=" <<
rips_threshold << "==========" << std::endl;
Gudhi::Points_off_reader<Point> off_reader(off_file_name);
@@ -398,7 +398,7 @@ BOOST_AUTO_TEST_CASE(Rips_create_complex_throw) {
Simplex_tree stree;
std::vector<int> simplex = {0, 1, 2};
stree.insert_simplex_and_subfaces(simplex);
- std::cout << "Check exception throw in debug mode" << std::endl;
+ std::clog << "Check exception throw in debug mode" << std::endl;
// throw excpt because stree is not empty
BOOST_CHECK_THROW (rips_complex_from_file.create_complex(stree, 1), std::invalid_argument);
}
diff --git a/src/Rips_complex/utilities/CMakeLists.txt b/src/Rips_complex/utilities/CMakeLists.txt
index 4b565628..d8c8e0b8 100644
--- a/src/Rips_complex/utilities/CMakeLists.txt
+++ b/src/Rips_complex/utilities/CMakeLists.txt
@@ -1,34 +1,45 @@
project(Rips_complex_utilities)
-add_executable(rips_distance_matrix_persistence rips_distance_matrix_persistence.cpp)
-target_link_libraries(rips_distance_matrix_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
-add_executable(rips_persistence rips_persistence.cpp)
-target_link_libraries(rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
-add_executable(rips_correlation_matrix_persistence rips_correlation_matrix_persistence.cpp)
-target_link_libraries(rips_correlation_matrix_persistence ${Boost_SYSTEM_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY})
-
-add_executable(sparse_rips_persistence sparse_rips_persistence.cpp)
-target_link_libraries(sparse_rips_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
+if(TARGET Boost::program_options)
+ add_executable(rips_distance_matrix_persistence rips_distance_matrix_persistence.cpp)
+ target_link_libraries(rips_distance_matrix_persistence Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(rips_distance_matrix_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Rips_complex_utility_from_rips_distance_matrix COMMAND $<TARGET_FILE:rips_distance_matrix_persistence>
+ "${CMAKE_SOURCE_DIR}/data/distance_matrix/full_square_distance_matrix.csv" "-r" "1.0" "-d" "3" "-p" "3" "-m" "0")
+ install(TARGETS rips_distance_matrix_persistence DESTINATION bin)
+endif()
-if (TBB_FOUND)
- target_link_libraries(rips_distance_matrix_persistence ${TBB_LIBRARIES})
- target_link_libraries(rips_persistence ${TBB_LIBRARIES})
- target_link_libraries(rips_correlation_matrix_persistence ${TBB_LIBRARIES})
- target_link_libraries(sparse_rips_persistence ${TBB_LIBRARIES})
+if(TARGET Boost::program_options)
+ add_executable(rips_persistence rips_persistence.cpp)
+ target_link_libraries(rips_persistence Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(rips_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Rips_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:rips_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
+ install(TARGETS rips_persistence DESTINATION bin)
endif()
-add_test(NAME Rips_complex_utility_from_rips_distance_matrix COMMAND $<TARGET_FILE:rips_distance_matrix_persistence>
- "${CMAKE_SOURCE_DIR}/data/distance_matrix/full_square_distance_matrix.csv" "-r" "1.0" "-d" "3" "-p" "3" "-m" "0")
-add_test(NAME Rips_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:rips_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
-add_test(NAME Rips_complex_utility_from_rips_correlation_matrix COMMAND $<TARGET_FILE:rips_correlation_matrix_persistence>
- "${CMAKE_SOURCE_DIR}/data/correlation_matrix/lower_triangular_correlation_matrix.csv" "-c" "0.3" "-d" "3" "-p" "3" "-m" "0")
-add_test(NAME Sparse_rips_complex_utility_on_tore_3D COMMAND $<TARGET_FILE:sparse_rips_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-e" "0.5" "-m" "0.2" "-d" "3" "-p" "2")
+if(TARGET Boost::program_options)
+ add_executable(rips_correlation_matrix_persistence rips_correlation_matrix_persistence.cpp)
+ target_link_libraries(rips_correlation_matrix_persistence Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(rips_correlation_matrix_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Rips_complex_utility_from_rips_correlation_matrix COMMAND $<TARGET_FILE:rips_correlation_matrix_persistence>
+ "${CMAKE_SOURCE_DIR}/data/correlation_matrix/lower_triangular_correlation_matrix.csv" "-c" "0.3" "-d" "3" "-p" "3" "-m" "0")
+ install(TARGETS rips_correlation_matrix_persistence DESTINATION bin)
+endif()
-install(TARGETS rips_distance_matrix_persistence DESTINATION bin)
-install(TARGETS rips_persistence DESTINATION bin)
-install(TARGETS rips_correlation_matrix_persistence DESTINATION bin)
-install(TARGETS sparse_rips_persistence DESTINATION bin)
+if(TARGET Boost::program_options)
+ add_executable(sparse_rips_persistence sparse_rips_persistence.cpp)
+ target_link_libraries(sparse_rips_persistence Boost::program_options)
+ if (TBB_FOUND)
+ target_link_libraries(sparse_rips_persistence ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Sparse_rips_complex_utility_on_tore_3D COMMAND $<TARGET_FILE:sparse_rips_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-e" "0.5" "-m" "0.2" "-d" "3" "-p" "2")
+ install(TARGETS sparse_rips_persistence DESTINATION bin)
+endif()
diff --git a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
index 585de4a0..72ddc797 100644
--- a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
@@ -68,11 +68,8 @@ int main(int argc, char* argv[]) {
Simplex_tree simplex_tree;
rips_complex_from_file.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
@@ -121,7 +118,7 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"min-edge-corelation,c", po::value<Filtration_value>(&correlation_min)->default_value(0),
"Minimal corelation of an edge for the Rips complex construction.")(
"cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
@@ -143,17 +140,17 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a corelation matrix.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a corelation matrix.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
index ad429e11..77ad841a 100644
--- a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
@@ -47,11 +47,8 @@ int main(int argc, char* argv[]) {
Simplex_tree simplex_tree;
rips_complex_from_file.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
@@ -82,7 +79,7 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-edge-length,r",
po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal length of an edge for the Rips complex construction.")(
@@ -105,17 +102,17 @@ void program_options(int argc, char* argv[], std::string& csv_matrix_file, std::
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of distance matrix.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of distance matrix.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Rips_complex/utilities/rips_persistence.cpp b/src/Rips_complex/utilities/rips_persistence.cpp
index daa7e1db..43194821 100644
--- a/src/Rips_complex/utilities/rips_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_persistence.cpp
@@ -49,11 +49,8 @@ int main(int argc, char* argv[]) {
Simplex_tree simplex_tree;
rips_complex_from_file.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
@@ -84,7 +81,7 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-edge-length,r",
po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal length of an edge for the Rips complex construction.")(
@@ -107,17 +104,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Rips complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Rips complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Rips_complex/utilities/sparse_rips_persistence.cpp b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
index cefd8a67..829c85e6 100644
--- a/src/Rips_complex/utilities/sparse_rips_persistence.cpp
+++ b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
@@ -51,11 +51,8 @@ int main(int argc, char* argv[]) {
Simplex_tree simplex_tree;
sparse_rips.create_complex(simplex_tree, dim_max);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
@@ -87,7 +84,7 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-edge-length,r",
po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
"Maximal length of an edge for the Rips complex construction.")(
@@ -112,17 +109,17 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a sparse 1/(1-epsilon)-approximation of the Rips complex \ndefined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a sparse 1/(1-epsilon)-approximation of the Rips complex \ndefined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Simplex_tree/doc/Intro_simplex_tree.h b/src/Simplex_tree/doc/Intro_simplex_tree.h
index 800879fe..2d3ecdec 100644
--- a/src/Simplex_tree/doc/Intro_simplex_tree.h
+++ b/src/Simplex_tree/doc/Intro_simplex_tree.h
@@ -39,11 +39,9 @@ namespace Gudhi {
* \subsubsection filteredcomplexessimplextreeexamples Examples
*
* Here is a list of simplex tree examples :
- * \li <a href="_simplex_tree_2simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a> - Simple simplex tree construction and basic function use.
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp} - Simple simplex tree construction and basic function use.
*
- * \li <a href="_simplex_tree_2simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a> - Simplex tree construction from cliques of graph read in
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp} - Simplex tree construction from cliques of graph read in
* a file.
*
* Simplex tree construction with \f$\mathbb{Z}/3\mathbb{Z}\f$ coefficients on weighted graph Klein bottle file:
@@ -54,12 +52,10 @@ Expand the simplex tree in 3.8e-05 s.
Information of the Simplex Tree:
Number of vertices = 10 Number of simplices = 98 \endcode
*
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a> - Simplex tree is computed and displayed
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp} - Simplex tree is computed and displayed
* from a 3D alpha complex (Requires CGAL, GMP and GMPXX to be installed).
*
- * \li <a href="_simplex_tree_2graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a> - Simple simplex tree construction from a one-skeleton graph with
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp} - Simple simplex tree construction from a one-skeleton graph with
* a simple blocker expansion method.
*
* \subsection filteredcomplexeshassecomplex Hasse complex
diff --git a/src/Simplex_tree/example/CMakeLists.txt b/src/Simplex_tree/example/CMakeLists.txt
index 8a8cac58..81d352fc 100644
--- a/src/Simplex_tree/example/CMakeLists.txt
+++ b/src/Simplex_tree/example/CMakeLists.txt
@@ -29,18 +29,20 @@ if(GMP_FOUND AND NOT CGAL_VERSION VERSION_LESS 4.11.0)
target_link_libraries(Simplex_tree_example_alpha_shapes_3_from_off ${TBB_LIBRARIES})
endif()
add_test(NAME Simplex_tree_example_alpha_shapes_3_from_off COMMAND $<TARGET_FILE:Simplex_tree_example_alpha_shapes_3_from_off>
- "${CMAKE_SOURCE_DIR}/data/points/bunny_5000.off")
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off")
endif()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- add_executable ( Simplex_tree_example_cech_complex_cgal_mini_sphere_3d cech_complex_cgal_mini_sphere_3d.cpp )
- target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d ${Boost_PROGRAM_OPTIONS_LIBRARY} ${CGAL_LIBRARY})
- if (TBB_FOUND)
- target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d ${TBB_LIBRARIES})
+ if(TARGET Boost::program_options)
+ add_executable ( Simplex_tree_example_cech_complex_cgal_mini_sphere_3d cech_complex_cgal_mini_sphere_3d.cpp )
+ target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d Boost::program_options ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Simplex_tree_example_cech_complex_cgal_mini_sphere_3d ${TBB_LIBRARIES})
+ endif()
+ add_test(NAME Simplex_tree_example_cech_complex_cgal_mini_sphere_3d COMMAND $<TARGET_FILE:Simplex_tree_example_cech_complex_cgal_mini_sphere_3d>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" -r 0.3 -d 3)
endif()
- add_test(NAME Simplex_tree_example_cech_complex_cgal_mini_sphere_3d COMMAND $<TARGET_FILE:Simplex_tree_example_cech_complex_cgal_mini_sphere_3d>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" -r 0.3 -d 3)
endif ()
add_executable ( Simplex_tree_example_graph_expansion_with_blocker graph_expansion_with_blocker.cpp )
diff --git a/src/Simplex_tree/example/README b/src/Simplex_tree/example/README
deleted file mode 100644
index a9498173..00000000
--- a/src/Simplex_tree/example/README
+++ /dev/null
@@ -1,73 +0,0 @@
-To build the example, run in a Terminal:
-
-cd /path-to-gudhi/
-cmake .
-cd /path-to-example/
-make
-
-
-Example of use :
-
-*** Simple simplex tree construction
-
-./Simplex_tree_example_simple_simplex_tree
-
-********************************************************************
-EXAMPLE OF SIMPLE INSERTION
- * INSERT 0
- + 0 INSERTED
- * INSERT 1
- + 1 INSERTED
- * INSERT (0,1)
- + (0,1) INSERTED
- * INSERT 2
- + 2 INSERTED
- * INSERT (2,0)
- + (2,0) INSERTED
- * INSERT (2,1)
- + (2,1) INSERTED
- * INSERT (2,1,0)
- + (2,1,0) INSERTED
- * INSERT 3
- + 3 INSERTED
- * INSERT (3,0)
- + (3,0) INSERTED
- * INSERT 0 (already inserted)
- - 0 NOT INSERTED
- * INSERT (2,1,0) (already inserted)
- - (2,1,0) NOT INSERTED
-********************************************************************
-* The complex contains 9 simplices
- - dimension 2 - filtration 0.4
-* Iterator on Simplices in the filtration, with [filtration value]:
- [0.1] 0
- [0.1] 1
- [0.1] 2
- [0.1] 3
- [0.2] 1 0
- [0.2] 2 0
- [0.2] 2 1
- [0.2] 3 0
- [0.3] 2 1 0
-
-*** Simplex tree construction with Z/2Z coefficients on weighted graph Klein bottle file:
-
-./Simplex_tree_example_from_cliques_of_graph ../../../data/points/Klein_bottle_complex.txt 2
-Insert the 1-skeleton in the simplex tree in 0 s.
-Expand the simplex tree in 0 s.
-Information of the Simplex Tree:
- Number of vertices = 10 Number of simplices = 82
-
-with Z/3Z coefficients:
-
-./Simplex_tree_example_from_cliques_of_graph ../../../data/points/Klein_bottle_complex.txt 3
-
-Insert the 1-skeleton in the simplex tree in 0 s.
-Expand the simplex tree in 0 s.
-Information of the Simplex Tree:
- Number of vertices = 10 Number of simplices = 106
-
-*** Simplex_tree computed and displayed from a 3D alpha complex:
- [ Requires CGAL, GMP and GMPXX to be installed]
-
-./Simplex_tree_example_alpha_shapes_3_from_off ../../../data/points/bunny_5000
diff --git a/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp b/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp
index d716fb1f..0e7e382b 100644
--- a/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp
+++ b/src/Simplex_tree/example/cech_complex_cgal_mini_sphere_3d.cpp
@@ -55,18 +55,18 @@ class Cech_blocker {
bool operator()(Simplex_handle sh) {
std::vector<Point> points;
#if DEBUG_TRACES
- std::cout << "Cech_blocker on [";
+ std::clog << "Cech_blocker on [";
#endif // DEBUG_TRACES
for (auto vertex : simplex_tree_.simplex_vertex_range(sh)) {
points.push_back(point_cloud_[vertex]);
#if DEBUG_TRACES
- std::cout << vertex << ", ";
+ std::clog << vertex << ", ";
#endif // DEBUG_TRACES
}
Min_sphere ms(points.begin(), points.end());
Filtration_value radius = ms.radius();
#if DEBUG_TRACES
- std::cout << "] - radius = " << radius << " - returns " << (radius > threshold_) << std::endl;
+ std::clog << "] - radius = " << radius << " - returns " << (radius > threshold_) << std::endl;
#endif // DEBUG_TRACES
simplex_tree_.assign_filtration(sh, radius);
return (radius > threshold_);
@@ -106,24 +106,24 @@ int main(int argc, char* argv[]) {
// expand the graph until dimension dim_max
st.expansion_with_blockers(dim_max, Cech_blocker(st, threshold, off_reader.get_point_cloud()));
- std::cout << "The complex contains " << st.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << st.dimension() << " \n";
+ std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << st.dimension() << " \n";
// Sort the simplices in the order of the filtration
st.initialize_filtration();
#if DEBUG_TRACES
- std::cout << "********************************************************************\n";
+ std::clog << "********************************************************************\n";
// Display the Simplex_tree - Can not be done in the middle of 2 inserts
- std::cout << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " "
+ std::clog << " "
<< "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << static_cast<int>(vertex) << " ";
+ std::clog << static_cast<int>(vertex) << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
#endif // DEBUG_TRACES
return 0;
@@ -154,11 +154,11 @@ void program_options(int argc, char* argv[], std::string& off_file_points, Filtr
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Construct a Cech complex defined on a set of input points.\n \n";
+ std::clog << std::endl;
+ std::clog << "Construct a Cech complex defined on a set of input points.\n \n";
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp b/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp
index e455c426..8ee7ab74 100644
--- a/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp
+++ b/src/Simplex_tree/example/example_alpha_shapes_3_simplex_tree_from_off_file.cpp
@@ -63,7 +63,7 @@ Vertex_list from(const Cell_handle& ch) {
Vertex_list the_list;
for (auto i = 0; i < 4; i++) {
#ifdef DEBUG_TRACES
- std::cout << "from cell[" << i << "]=" << ch->vertex(i)->point() << std::endl;
+ std::clog << "from cell[" << i << "]=" << ch->vertex(i)->point() << std::endl;
#endif // DEBUG_TRACES
the_list.push_back(ch->vertex(i));
}
@@ -75,7 +75,7 @@ Vertex_list from(const Facet& fct) {
for (auto i = 0; i < 4; i++) {
if (fct.second != i) {
#ifdef DEBUG_TRACES
- std::cout << "from facet=[" << i << "]" << fct.first->vertex(i)->point() << std::endl;
+ std::clog << "from facet=[" << i << "]" << fct.first->vertex(i)->point() << std::endl;
#endif // DEBUG_TRACES
the_list.push_back(fct.first->vertex(i));
}
@@ -88,7 +88,7 @@ Vertex_list from(const Edge& edg) {
for (auto i = 0; i < 4; i++) {
if ((edg.second == i) || (edg.third == i)) {
#ifdef DEBUG_TRACES
- std::cout << "from edge[" << i << "]=" << edg.first->vertex(i)->point() << std::endl;
+ std::clog << "from edge[" << i << "]=" << edg.first->vertex(i)->point() << std::endl;
#endif // DEBUG_TRACES
the_list.push_back(edg.first->vertex(i));
}
@@ -99,7 +99,7 @@ Vertex_list from(const Edge& edg) {
Vertex_list from(const Alpha_shape_3::Vertex_handle& vh) {
Vertex_list the_list;
#ifdef DEBUG_TRACES
- std::cout << "from vertex=" << vh->point() << std::endl;
+ std::clog << "from vertex=" << vh->point() << std::endl;
#endif // DEBUG_TRACES
the_list.push_back(vh);
return the_list;
@@ -128,7 +128,7 @@ int main(int argc, char * const argv[]) {
// alpha shape construction from points. CGAL has a strange behavior in REGULARIZED mode.
Alpha_shape_3 as(lp.begin(), lp.end(), 0, Alpha_shape_3::GENERAL);
#ifdef DEBUG_TRACES
- std::cout << "Alpha shape computed in GENERAL mode" << std::endl;
+ std::clog << "Alpha shape computed in GENERAL mode" << std::endl;
#endif // DEBUG_TRACES
// filtration with alpha values from alpha shape
@@ -140,7 +140,7 @@ int main(int argc, char * const argv[]) {
as.filtration_with_alpha_values(disp);
#ifdef DEBUG_TRACES
- std::cout << "filtration_with_alpha_values returns : " << the_objects.size() << " objects" << std::endl;
+ std::clog << "filtration_with_alpha_values returns : " << the_objects.size() << " objects" << std::endl;
#endif // DEBUG_TRACES
Alpha_shape_3::size_type count_vertices = 0;
@@ -177,7 +177,7 @@ int main(int argc, char * const argv[]) {
// alpha shape not found
Simplex_tree_vertex vertex = map_cgal_simplex_tree.size();
#ifdef DEBUG_TRACES
- std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] not found - insert_simplex " << vertex << "\n";
+ std::clog << "vertex [" << the_alpha_shape_vertex->point() << "] not found - insert_simplex " << vertex << "\n";
#endif // DEBUG_TRACES
the_simplex_tree.push_back(vertex);
map_cgal_simplex_tree.insert(Alpha_shape_simplex_tree_pair(the_alpha_shape_vertex, vertex));
@@ -185,14 +185,14 @@ int main(int argc, char * const argv[]) {
// alpha shape found
Simplex_tree_vertex vertex = the_map_iterator->second;
#ifdef DEBUG_TRACES
- std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] found in " << vertex << std::endl;
+ std::clog << "vertex [" << the_alpha_shape_vertex->point() << "] found in " << vertex << std::endl;
#endif // DEBUG_TRACES
the_simplex_tree.push_back(vertex);
}
}
// Construction of the simplex_tree
#ifdef DEBUG_TRACES
- std::cout << "filtration = " << *the_alpha_value_iterator << std::endl;
+ std::clog << "filtration = " << *the_alpha_value_iterator << std::endl;
#endif // DEBUG_TRACES
simplex_tree.insert_simplex(the_simplex_tree, std::sqrt(*the_alpha_value_iterator));
if (the_alpha_value_iterator != the_alpha_values.end())
@@ -201,61 +201,61 @@ int main(int argc, char * const argv[]) {
std::cerr << "This shall not happen" << std::endl;
}
#ifdef DEBUG_TRACES
- std::cout << "vertices \t\t" << count_vertices << std::endl;
- std::cout << "edges \t\t" << count_edges << std::endl;
- std::cout << "facets \t\t" << count_facets << std::endl;
- std::cout << "cells \t\t" << count_cells << std::endl;
+ std::clog << "vertices \t\t" << count_vertices << std::endl;
+ std::clog << "edges \t\t" << count_edges << std::endl;
+ std::clog << "facets \t\t" << count_facets << std::endl;
+ std::clog << "cells \t\t" << count_cells << std::endl;
- std::cout << "Information of the Simplex Tree:\n";
- std::cout << " Number of vertices = " << simplex_tree.num_vertices() << " ";
- std::cout << " Number of simplices = " << simplex_tree.num_simplices() << std::endl << std::endl;
+ std::clog << "Information of the Simplex Tree:\n";
+ std::clog << " Number of vertices = " << simplex_tree.num_vertices() << " ";
+ std::clog << " Number of simplices = " << simplex_tree.num_simplices() << std::endl << std::endl;
#endif // DEBUG_TRACES
#ifdef DEBUG_TRACES
- std::cout << "Iterator on vertices: \n";
+ std::clog << "Iterator on vertices: \n";
for (auto vertex : simplex_tree.complex_vertex_range()) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
#endif // DEBUG_TRACES
- std::cout << simplex_tree << std::endl;
+ std::clog << simplex_tree << std::endl;
#ifdef DEBUG_TRACES
- std::cout << std::endl << std::endl << "Iterator on simplices:\n";
+ std::clog << std::endl << std::endl << "Iterator on simplices:\n";
for (auto simplex : simplex_tree.complex_simplex_range()) {
- std::cout << " ";
+ std::clog << " ";
for (auto vertex : simplex_tree.simplex_vertex_range(simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
#endif // DEBUG_TRACES
#ifdef DEBUG_TRACES
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
#endif // DEBUG_TRACES
#ifdef DEBUG_TRACES
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, and their boundary simplices:\n";
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, and their boundary simplices:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
for (auto b_simplex : simplex_tree.boundary_simplex_range(f_simplex)) {
- std::cout << " " << "[" << simplex_tree.filtration(b_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(b_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(b_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
#endif // DEBUG_TRACES
diff --git a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
index 494f8b1d..eef8b665 100644
--- a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
+++ b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
@@ -34,31 +34,31 @@ int main(int argc, char* const argv[]) {
stree.expansion_with_blockers(3, [&](Simplex_handle sh) {
bool result = false;
- std::cout << "Blocker on [";
+ std::clog << "Blocker on [";
// User can loop on the vertices from the given simplex_handle i.e.
for (auto vertex : stree.simplex_vertex_range(sh)) {
// We block the expansion, if the vertex '6' is in the given list of vertices
if (vertex == 6) result = true;
- std::cout << vertex << ", ";
+ std::clog << vertex << ", ";
}
- std::cout << "] ( " << stree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ std::clog << "] ( " << stree.filtration(sh);
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
stree.assign_filtration(sh, stree.filtration(sh) + 1.);
- std::cout << " + 1. ) = " << result << std::endl;
+ std::clog << " + 1. ) = " << result << std::endl;
return result;
});
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << stree.num_simplices() << " simplices";
- std::cout << " - dimension " << stree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "********************************************************************\n";
+ std::clog << "* The complex contains " << stree.num_simplices() << " simplices";
+ std::clog << " - dimension " << stree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : stree.filtration_simplex_range()) {
- std::cout << " "
+ std::clog << " "
<< "[" << stree.filtration(f_simplex) << "] ";
- for (auto vertex : stree.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : stree.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
return 0;
diff --git a/src/Simplex_tree/example/mini_simplex_tree.cpp b/src/Simplex_tree/example/mini_simplex_tree.cpp
index bbc582c7..4043bffd 100644
--- a/src/Simplex_tree/example/mini_simplex_tree.cpp
+++ b/src/Simplex_tree/example/mini_simplex_tree.cpp
@@ -48,7 +48,7 @@ int main() {
for (ST::Simplex_handle t : st.cofaces_simplex_range(e, 1)) {
// Only coface is 012
for (ST::Vertex_handle v : st.simplex_vertex_range(t)) // v in { 0, 1, 2 }
- std::cout << v;
- std::cout << '\n';
+ std::clog << v;
+ std::clog << '\n';
}
}
diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp
index 4353939f..965711da 100644
--- a/src/Simplex_tree/example/simple_simplex_tree.cpp
+++ b/src/Simplex_tree/example/simple_simplex_tree.cpp
@@ -28,8 +28,8 @@ int main(int argc, char* const argv[]) {
const Filtration_value FOURTH_FILTRATION_VALUE = 0.4;
// TEST OF INSERTION
- std::cout << "********************************************************************" << std::endl;
- std::cout << "EXAMPLE OF SIMPLE INSERTION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "EXAMPLE OF SIMPLE INSERTION" << std::endl;
// Construct the Simplex Tree
Simplex_tree simplexTree;
@@ -41,140 +41,139 @@ int main(int argc, char* const argv[]) {
/* 2 0 3 */
// ++ FIRST
- std::cout << " * INSERT 0" << std::endl;
+ std::clog << " * INSERT 0" << std::endl;
typeVectorVertex firstSimplexVector = {0};
typePairSimplexBool returnValue =
simplexTree.insert_simplex(firstSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + 0 INSERTED" << std::endl;
+ std::clog << " + 0 INSERTED" << std::endl;
} else {
- std::cout << " - 0 NOT INSERTED" << std::endl;
+ std::clog << " - 0 NOT INSERTED" << std::endl;
}
// ++ SECOND
- std::cout << " * INSERT 1" << std::endl;
+ std::clog << " * INSERT 1" << std::endl;
typeVectorVertex secondSimplexVector = {1};
returnValue = simplexTree.insert_simplex(secondSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + 1 INSERTED" << std::endl;
+ std::clog << " + 1 INSERTED" << std::endl;
} else {
- std::cout << " - 1 NOT INSERTED" << std::endl;
+ std::clog << " - 1 NOT INSERTED" << std::endl;
}
// ++ THIRD
- std::cout << " * INSERT (0,1)" << std::endl;
+ std::clog << " * INSERT (0,1)" << std::endl;
typeVectorVertex thirdSimplexVector = {0, 1};
returnValue = simplexTree.insert_simplex(thirdSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (0,1) INSERTED" << std::endl;
+ std::clog << " + (0,1) INSERTED" << std::endl;
} else {
- std::cout << " - (0,1) NOT INSERTED" << std::endl;
+ std::clog << " - (0,1) NOT INSERTED" << std::endl;
}
// ++ FOURTH
- std::cout << " * INSERT 2" << std::endl;
+ std::clog << " * INSERT 2" << std::endl;
typeVectorVertex fourthSimplexVector = {2};
returnValue = simplexTree.insert_simplex(fourthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + 2 INSERTED" << std::endl;
+ std::clog << " + 2 INSERTED" << std::endl;
} else {
- std::cout << " - 2 NOT INSERTED" << std::endl;
+ std::clog << " - 2 NOT INSERTED" << std::endl;
}
// ++ FIFTH
- std::cout << " * INSERT (2,0)" << std::endl;
+ std::clog << " * INSERT (2,0)" << std::endl;
typeVectorVertex fifthSimplexVector = {2, 0};
returnValue = simplexTree.insert_simplex(fifthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (2,0) INSERTED" << std::endl;
+ std::clog << " + (2,0) INSERTED" << std::endl;
} else {
- std::cout << " - (2,0) NOT INSERTED" << std::endl;
+ std::clog << " - (2,0) NOT INSERTED" << std::endl;
}
// ++ SIXTH
- std::cout << " * INSERT (2,1)" << std::endl;
+ std::clog << " * INSERT (2,1)" << std::endl;
typeVectorVertex sixthSimplexVector = {2, 1};
returnValue = simplexTree.insert_simplex(sixthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (2,1) INSERTED" << std::endl;
+ std::clog << " + (2,1) INSERTED" << std::endl;
} else {
- std::cout << " - (2,1) NOT INSERTED" << std::endl;
+ std::clog << " - (2,1) NOT INSERTED" << std::endl;
}
// ++ SEVENTH
- std::cout << " * INSERT (2,1,0)" << std::endl;
+ std::clog << " * INSERT (2,1,0)" << std::endl;
typeVectorVertex seventhSimplexVector = {2, 1, 0};
returnValue = simplexTree.insert_simplex(seventhSimplexVector, Filtration_value(THIRD_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (2,1,0) INSERTED" << std::endl;
+ std::clog << " + (2,1,0) INSERTED" << std::endl;
} else {
- std::cout << " - (2,1,0) NOT INSERTED" << std::endl;
+ std::clog << " - (2,1,0) NOT INSERTED" << std::endl;
}
// ++ EIGHTH
- std::cout << " * INSERT 3" << std::endl;
+ std::clog << " * INSERT 3" << std::endl;
typeVectorVertex eighthSimplexVector = {3};
returnValue = simplexTree.insert_simplex(eighthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + 3 INSERTED" << std::endl;
+ std::clog << " + 3 INSERTED" << std::endl;
} else {
- std::cout << " - 3 NOT INSERTED" << std::endl;
+ std::clog << " - 3 NOT INSERTED" << std::endl;
}
- // ++ NINETH
- std::cout << " * INSERT (3,0)" << std::endl;
+ // ++ NINTH
+ std::clog << " * INSERT (3,0)" << std::endl;
typeVectorVertex ninethSimplexVector = {3, 0};
returnValue = simplexTree.insert_simplex(ninethSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (3,0) INSERTED" << std::endl;
+ std::clog << " + (3,0) INSERTED" << std::endl;
} else {
- std::cout << " - (3,0) NOT INSERTED" << std::endl;
+ std::clog << " - (3,0) NOT INSERTED" << std::endl;
}
// ++ TENTH
- std::cout << " * INSERT 0 (already inserted)" << std::endl;
+ std::clog << " * INSERT 0 (already inserted)" << std::endl;
typeVectorVertex tenthSimplexVector = {0};
// With a different filtration value
returnValue = simplexTree.insert_simplex(tenthSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + 0 INSERTED" << std::endl;
+ std::clog << " + 0 INSERTED" << std::endl;
} else {
- std::cout << " - 0 NOT INSERTED" << std::endl;
+ std::clog << " - 0 NOT INSERTED" << std::endl;
}
// ++ ELEVENTH
- std::cout << " * INSERT (2,1,0) (already inserted)" << std::endl;
+ std::clog << " * INSERT (2,1,0) (already inserted)" << std::endl;
typeVectorVertex eleventhSimplexVector = {2, 1, 0};
returnValue = simplexTree.insert_simplex(eleventhSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE));
if (returnValue.second == true) {
- std::cout << " + (2,1,0) INSERTED" << std::endl;
+ std::clog << " + (2,1,0) INSERTED" << std::endl;
} else {
- std::cout << " - (2,1,0) NOT INSERTED" << std::endl;
+ std::clog << " - (2,1,0) NOT INSERTED" << std::endl;
}
// ++ GENERAL VARIABLE SET
- std::cout << "********************************************************************\n";
- // Display the Simplex_tree - Can not be done in the middle of 2 inserts
- std::cout << "* The complex contains " << simplexTree.num_simplices() << " simplices\n";
- std::cout << " - dimension " << simplexTree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "********************************************************************\n";
+ std::clog << "* The complex contains " << simplexTree.num_simplices() << " simplices\n";
+ std::clog << " - dimension " << simplexTree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplexTree.filtration_simplex_range()) {
- std::cout << " "
+ std::clog << " "
<< "[" << simplexTree.filtration(f_simplex) << "] ";
- for (auto vertex : simplexTree.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : simplexTree.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
// [0.1] 0
// [0.1] 1
@@ -190,66 +189,66 @@ int main(int argc, char* const argv[]) {
// Find in the simplex_tree
// ------------------------------------------------------------------------------------------------------------------
Simplex_tree::Simplex_handle simplexFound = simplexTree.find(secondSimplexVector);
- std::cout << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
typeVectorVertex unknownSimplexVector = {15};
simplexFound = simplexTree.find(unknownSimplexVector);
- std::cout << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
simplexFound = simplexTree.find(fifthSimplexVector);
- std::cout << "**************IS THE SIMPLEX {2,0} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {2,0} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
typeVectorVertex otherSimplexVector = {1, 15};
simplexFound = simplexTree.find(otherSimplexVector);
- std::cout << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
typeVectorVertex invSimplexVector = {1, 2, 0};
simplexFound = simplexTree.find(invSimplexVector);
- std::cout << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
simplexFound = simplexTree.find({0, 1});
- std::cout << "**************IS THE SIMPLEX {0,1} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {0,1} IN THE SIMPLEX TREE ?\n";
if (simplexFound != simplexTree.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
- std::cout << "**************COFACES OF {0,1} IN CODIMENSION 1 ARE\n";
+ std::clog << "**************COFACES OF {0,1} IN CODIMENSION 1 ARE\n";
for (auto& simplex : simplexTree.cofaces_simplex_range(simplexTree.find({0, 1}), 1)) {
- for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
- std::cout << "**************STARS OF {0,1} ARE\n";
+ std::clog << "**************STARS OF {0,1} ARE\n";
for (auto& simplex : simplexTree.star_simplex_range(simplexTree.find({0, 1}))) {
- for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
- std::cout << "**************BOUNDARIES OF {0,1,2} ARE\n";
+ std::clog << "**************BOUNDARIES OF {0,1,2} ARE\n";
for (auto& simplex : simplexTree.boundary_simplex_range(simplexTree.find({0, 1, 2}))) {
- for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : simplexTree.simplex_vertex_range(simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
return 0;
diff --git a/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp b/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp
index f6dfa53c..6278efa7 100644
--- a/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp
+++ b/src/Simplex_tree/example/simplex_tree_from_cliques_of_graph.cpp
@@ -42,67 +42,67 @@ int main(int argc, char * const argv[]) {
// insert the graph in the simplex tree as 1-skeleton
st.insert_graph(g);
end = clock();
- std::cout << "Insert the 1-skeleton in the simplex tree in "
+ std::clog << "Insert the 1-skeleton in the simplex tree in "
<< static_cast<double>(end - start) / CLOCKS_PER_SEC << " s. \n";
start = clock();
// expand the 1-skeleton until dimension max_dim
st.expansion(max_dim);
end = clock();
- std::cout << "max_dim = " << max_dim << "\n";
- std::cout << "Expand the simplex tree in "
+ std::clog << "max_dim = " << max_dim << "\n";
+ std::clog << "Expand the simplex tree in "
<< static_cast<double>(end - start) / CLOCKS_PER_SEC << " s. \n";
- std::cout << "Information of the Simplex Tree: " << std::endl;
- std::cout << " Number of vertices = " << st.num_vertices() << " ";
- std::cout << " Number of simplices = " << st.num_simplices() << std::endl;
- std::cout << std::endl << std::endl;
+ std::clog << "Information of the Simplex Tree: " << std::endl;
+ std::clog << " Number of vertices = " << st.num_vertices() << " ";
+ std::clog << " Number of simplices = " << st.num_simplices() << std::endl;
+ std::clog << std::endl << std::endl;
- std::cout << "Iterator on vertices: ";
+ std::clog << "Iterator on vertices: ";
for (auto vertex : st.complex_vertex_range()) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
- std::cout << "Iterator on simplices: " << std::endl;
+ std::clog << "Iterator on simplices: " << std::endl;
for (auto simplex : st.complex_simplex_range()) {
- std::cout << " ";
+ std::clog << " ";
for (auto vertex : st.simplex_vertex_range(simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
- std::cout << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
- std::cout << "Iterator on Simplices in the filtration, and their boundary simplices:" << std::endl;
+ std::clog << "Iterator on Simplices in the filtration, and their boundary simplices:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
for (auto b_simplex : st.boundary_simplex_range(f_simplex)) {
- std::cout << " " << "[" << st.filtration(b_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(b_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(b_simplex)) {
- std::cout << vertex << " ";
+ std::clog << vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
return 0;
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 76608008..4177a0b8 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -24,6 +24,9 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/range/adaptor/reversed.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+#include <boost/range/size.hpp>
+#include <boost/container/static_vector.hpp>
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_sort.h>
@@ -41,6 +44,24 @@
namespace Gudhi {
+/** \addtogroup simplex_tree
+ * @{
+ */
+
+/**
+ * \class Extended_simplex_type Simplex_tree.h gudhi/Simplex_tree.h
+ * \brief Extended simplex type data structure for representing the type of simplices in an extended filtration.
+ *
+ * \details The extended simplex type can be either UP (which means
+ * that the simplex was present originally, and is thus part of the ascending extended filtration), DOWN (which means
+ * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or
+ * EXTRA (which means the simplex is the cone point).
+ *
+ * Details may be found in \cite Cohen-Steiner2009 and section 2.2 in \cite Carriere16.
+ *
+ */
+enum class Extended_simplex_type {UP, DOWN, EXTRA};
+
struct Simplex_tree_options_full_featured;
/**
@@ -82,10 +103,11 @@ class Simplex_tree {
// Simplex_key next to each other).
typedef typename boost::container::flat_map<Vertex_handle, Node> Dictionary;
- /* \brief Set of nodes sharing a same parent in the simplex tree. */
- /* \brief Set of nodes sharing a same parent in the simplex tree. */
+ /** \brief Set of nodes sharing a same parent in the simplex tree. */
typedef Simplex_tree_siblings<Simplex_tree, Dictionary> Siblings;
+
+
struct Key_simplex_base_real {
Key_simplex_base_real() : key_(-1) {}
void assign_key(Simplex_key k) { key_ = k; }
@@ -99,6 +121,12 @@ class Simplex_tree {
void assign_key(Simplex_key);
Simplex_key key() const;
};
+ struct Extended_filtration_data {
+ Filtration_value minval;
+ Filtration_value maxval;
+ Extended_filtration_data(){}
+ Extended_filtration_data(Filtration_value vmin, Filtration_value vmax): minval(vmin), maxval(vmax) {}
+ };
typedef typename std::conditional<Options::store_key, Key_simplex_base_real, Key_simplex_base_dummy>::type
Key_simplex_base;
@@ -119,7 +147,10 @@ class Simplex_tree {
public:
/** \brief Handle type to a simplex contained in the simplicial complex represented
- * by the simplex tree. */
+ * by the simplex tree.
+ *
+ * They are essentially pointers into internal vectors, and any insertion or removal
+ * of a simplex may invalidate any other Simplex_handle in the complex. */
typedef typename Dictionary::iterator Simplex_handle;
private:
@@ -161,6 +192,12 @@ class Simplex_tree {
typedef Simplex_tree_boundary_simplex_iterator<Simplex_tree> Boundary_simplex_iterator;
/** \brief Range over the simplices of the boundary of a simplex. */
typedef boost::iterator_range<Boundary_simplex_iterator> Boundary_simplex_range;
+ /** \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * 'value_type' is std::pair<Simplex_handle, Vertex_handle>. */
+ typedef Simplex_tree_boundary_opposite_vertex_simplex_iterator<Simplex_tree> Boundary_opposite_vertex_simplex_iterator;
+ /** \brief Range over the simplices of the boundary of a simplex and their opposite vertices. */
+ typedef boost::iterator_range<Boundary_opposite_vertex_simplex_iterator> Boundary_opposite_vertex_simplex_range;
/** \brief Iterator over the simplices of the simplicial complex.
*
* 'value_type' is Simplex_handle. */
@@ -232,11 +269,9 @@ class Simplex_tree {
*
* The filtration must be valid. If the filtration has not been initialized yet, the
* method initializes it (i.e. order the simplices). If the complex has changed since the last time the filtration
- * was initialized, please call `initialize_filtration()` to recompute it. */
+ * was initialized, please call `clear_filtration()` or `initialize_filtration()` to recompute it. */
Filtration_simplex_range const& filtration_simplex_range(Indexing_tag = Indexing_tag()) {
- if (filtration_vect_.empty()) {
- initialize_filtration();
- }
+ maybe_initialize_filtration();
return filtration_vect_;
}
@@ -246,8 +281,8 @@ class Simplex_tree {
* which is consequenlty
* equal to \f$(-1)^{\text{dim} \sigma}\f$ the canonical orientation on the simplex.
*/
- Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) {
- assert(sh != null_simplex()); // Empty simplex
+ Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) const {
+ GUDHI_CHECK(sh != null_simplex(), "empty simplex");
return Simplex_vertex_range(Simplex_vertex_iterator(this, sh),
Simplex_vertex_iterator(this));
}
@@ -272,6 +307,23 @@ class Simplex_tree {
Boundary_simplex_iterator(this));
}
+ /** \brief Given a simplex, returns a range over the simplices of its boundary and their opposite vertices.
+ *
+ * The boundary of a simplex is the set of codimension \f$1\f$ subsimplices of the simplex.
+ * If the simplex is \f$[v_0, \cdots ,v_d]\f$, with canonical orientation induced by \f$ v_0 < \cdots < v_d \f$, the
+ * iterator enumerates the simplices of the boundary in the order:
+ * \f$[v_0,\cdots,\widehat{v_i},\cdots,v_d]\f$ for \f$i\f$ from \f$d\f$ to \f$0\f$, where \f$\widehat{v_i}\f$ means
+ * that the vertex \f$v_i\f$, known as the opposite vertex, is omitted from boundary, but returned as the second
+ * element of a pair.
+ *
+ * @param[in] sh Simplex for which the boundary is computed.
+ */
+ template<class SimplexHandle>
+ Boundary_opposite_vertex_simplex_range boundary_opposite_vertex_simplex_range(SimplexHandle sh) {
+ return Boundary_opposite_vertex_simplex_range(Boundary_opposite_vertex_simplex_iterator(this, sh),
+ Boundary_opposite_vertex_simplex_iterator(this));
+ }
+
/** @} */ // end range and iterator methods
/** \name Constructor/Destructor
* @{ */
@@ -286,7 +338,7 @@ class Simplex_tree {
/** \brief User-defined copy constructor reproduces the whole tree structure. */
Simplex_tree(const Simplex_tree& complex_source) {
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree copy constructor" << std::endl;
+ std::clog << "Simplex_tree copy constructor" << std::endl;
#endif // DEBUG_TRACES
copy_from(complex_source);
}
@@ -296,7 +348,7 @@ class Simplex_tree {
*/
Simplex_tree(Simplex_tree && complex_source) {
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree move constructor" << std::endl;
+ std::clog << "Simplex_tree move constructor" << std::endl;
#endif // DEBUG_TRACES
move_from(complex_source);
@@ -313,7 +365,7 @@ class Simplex_tree {
/** \brief User-defined copy assignment reproduces the whole tree structure. */
Simplex_tree& operator= (const Simplex_tree& complex_source) {
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree copy assignment" << std::endl;
+ std::clog << "Simplex_tree copy assignment" << std::endl;
#endif // DEBUG_TRACES
// Self-assignment detection
if (&complex_source != this) {
@@ -330,7 +382,7 @@ class Simplex_tree {
*/
Simplex_tree& operator=(Simplex_tree&& complex_source) {
#ifdef DEBUG_TRACES
- std::cout << "Simplex_tree move assignment" << std::endl;
+ std::clog << "Simplex_tree move assignment" << std::endl;
#endif // DEBUG_TRACES
// Self-assignment detection
if (&complex_source != this) {
@@ -450,10 +502,19 @@ class Simplex_tree {
return true;
}
+ /** \brief Returns the filtration value of a simplex.
+ *
+ * Same as `filtration()`, but does not handle `null_simplex()`.
+ */
+ static Filtration_value filtration_(Simplex_handle sh) {
+ GUDHI_CHECK (sh != null_simplex(), "null simplex");
+ return sh->second.filtration();
+ }
+
public:
/** \brief Returns the key associated to a simplex.
*
- * The filtration must be initialized.
+ * If no key has been assigned, returns `null_key()`.
* \pre SimplexTreeOptions::store_key
*/
static Simplex_key key(Simplex_handle sh) {
@@ -463,7 +524,6 @@ class Simplex_tree {
/** \brief Returns the simplex that has index idx in the filtration.
*
* The filtration must be initialized.
- * \pre SimplexTreeOptions::store_key
*/
Simplex_handle simplex(Simplex_key idx) const {
return filtration_vect_[idx];
@@ -499,8 +559,7 @@ class Simplex_tree {
return Dictionary_it(nullptr);
}
- /** \brief Returns a key different for all keys associated to the
- * simplices of the simplicial complex. */
+ /** \brief Returns a fixed number not in the interval [0, `num_simplices()`). */
static Simplex_key null_key() {
return -1;
}
@@ -645,10 +704,10 @@ class Simplex_tree {
return true;
}
- private:
- /** \brief Inserts a simplex represented by a vector of vertex.
- * @param[in] simplex vector of Vertex_handles, representing the vertices of the new simplex. The vector must be
- * sorted by increasing vertex handle order.
+ protected:
+ /** \brief Inserts a simplex represented by a range of vertex.
+ * @param[in] simplex range of Vertex_handles, representing the vertices of the new simplex. The range must be
+ * sorted by increasing vertex handle order, and not empty.
* @param[in] filtration the filtration value assigned to the new simplex.
* @return If the new simplex is inserted successfully (i.e. it was not in the
* simplicial complex yet) the bool is set to true and the Simplex_handle is the handle assigned
@@ -660,12 +719,13 @@ class Simplex_tree {
* null_simplex.
*
*/
- std::pair<Simplex_handle, bool> insert_vertex_vector(const std::vector<Vertex_handle>& simplex,
+ template <class RandomVertexHandleRange = std::initializer_list<Vertex_handle>>
+ std::pair<Simplex_handle, bool> insert_simplex_raw(const RandomVertexHandleRange& simplex,
Filtration_value filtration) {
Siblings * curr_sib = &root_;
std::pair<Simplex_handle, bool> res_insert;
auto vi = simplex.begin();
- for (; vi != simplex.end() - 1; ++vi) {
+ for (; vi != std::prev(simplex.end()); ++vi) {
GUDHI_CHECK(*vi != null_vertex(), "cannot use the dummy null_vertex() as a real vertex");
res_insert = curr_sib->members_.emplace(*vi, Node(curr_sib, filtration));
if (!(has_children(res_insert.first))) {
@@ -686,9 +746,10 @@ class Simplex_tree {
return std::pair<Simplex_handle, bool>(null_simplex(), false);
}
// otherwise the insertion has succeeded - size is a size_type
- if (static_cast<int>(simplex.size()) - 1 > dimension_) {
+ int dim = static_cast<int>(boost::size(simplex)) - 1;
+ if (dim > dimension_) {
// Update dimension if needed
- dimension_ = static_cast<int>(simplex.size()) - 1;
+ dimension_ = dim;
}
return res_insert;
}
@@ -729,7 +790,7 @@ class Simplex_tree {
// Copy before sorting
std::vector<Vertex_handle> copy(first, last);
std::sort(std::begin(copy), std::end(copy));
- return insert_vertex_vector(copy, filtration);
+ return insert_simplex_raw(copy, filtration);
}
/** \brief Insert a N-simplex and all his subfaces, from a N-simplex represented by a range of
@@ -755,12 +816,7 @@ class Simplex_tree {
if (first == last)
return { null_simplex(), true }; // FIXME: false would make more sense to me.
- // Copy before sorting
- // Thread local is not available on XCode version < V.8 - It will slow down computation
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<Vertex_handle> copy;
+ thread_local std::vector<Vertex_handle> copy;
copy.clear();
copy.insert(copy.end(), first, last);
std::sort(copy.begin(), copy.end());
@@ -827,7 +883,7 @@ class Simplex_tree {
/** Returns the Siblings containing a simplex.*/
template<class SimplexHandle>
- Siblings* self_siblings(SimplexHandle sh) {
+ static Siblings* self_siblings(SimplexHandle sh) {
if (sh->second.children()->parent() == sh->first)
return sh->second.children()->oncles();
else
@@ -850,15 +906,13 @@ class Simplex_tree {
}
public:
- /** \brief Initializes the filtrations, i.e. sort the
- * simplices according to their order in the filtration and initializes all Simplex_keys.
+ /** \brief Initializes the filtration cache, i.e. sorts the
+ * simplices according to their order in the filtration.
*
- * After calling this method, filtration_simplex_range() becomes valid, and each simplex is
- * assigned a Simplex_key corresponding to its order in the filtration (from 0 to m-1 for a
- * simplicial complex with m simplices).
+ * It always recomputes the cache, even if one already exists.
*
- * Will be automatically called when calling filtration_simplex_range()
- * if the filtration has never been initialized yet. */
+ * Any insertion, deletion or change of filtration value invalidates this cache,
+ * which can be cleared with clear_filtration(). */
void initialize_filtration() {
filtration_vect_.clear();
filtration_vect_.reserve(num_simplices());
@@ -880,6 +934,21 @@ class Simplex_tree {
std::stable_sort(filtration_vect_.begin(), filtration_vect_.end(), is_before_in_filtration(this));
#endif
}
+ /** \brief Initializes the filtration cache if it isn't initialized yet.
+ *
+ * Automatically called by filtration_simplex_range(). */
+ void maybe_initialize_filtration() {
+ if (filtration_vect_.empty()) {
+ initialize_filtration();
+ }
+ }
+ /** \brief Clears the filtration cache produced by initialize_filtration().
+ *
+ * Useful when initialize_filtration() has already been called and we perform an operation
+ * (say an insertion) that invalidates the cache. */
+ void clear_filtration() {
+ filtration_vect_.clear();
+ }
private:
/** Recursive search of cofaces
@@ -903,7 +972,7 @@ class Simplex_tree {
// If we reached the end of the vertices, and the simplex has more vertices than the given simplex
// => we found a coface
- // Add a coface if we wan't the star or if the number of vertices of the current simplex matches with nbVertices
+ // Add a coface if we want the star or if the number of vertices of the current simplex matches with nbVertices
bool addCoface = (star || curr_nbVertices == nbVertices);
if (addCoface)
cofaces.push_back(simplex);
@@ -1021,8 +1090,8 @@ class Simplex_tree {
*
* Inserts all vertices and edges given by a OneSkeletonGraph.
* OneSkeletonGraph must be a model of
- * <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/EdgeListGraph.html">boost::EdgeListGraph</a>
- * and <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
+ * <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
+ * and <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
*
* The vertex filtration value is accessible through the property tag
* vertex_filtration_t.
@@ -1042,7 +1111,10 @@ class Simplex_tree {
// the simplex tree must be empty
assert(num_simplices() == 0);
- if (boost::num_vertices(skel_graph) == 0) {
+ // is there a better way to let the compiler know that we don't mean Simplex_tree::num_vertices?
+ using boost::num_vertices;
+
+ if (num_vertices(skel_graph) == 0) {
return;
}
if (num_edges(skel_graph) == 0) {
@@ -1051,25 +1123,21 @@ class Simplex_tree {
dimension_ = 1;
}
- root_.members_.reserve(boost::num_vertices(skel_graph));
+ root_.members_.reserve(num_vertices(skel_graph)); // probably useless in most cases
+ auto verts = vertices(skel_graph) | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, get(vertex_filtration_t(), skel_graph, v))); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ // This automatically sorts the vertices, the graph concept doesn't guarantee the order in which we iterate.
- typename boost::graph_traits<OneSkeletonGraph>::vertex_iterator v_it,
- v_it_end;
- for (std::tie(v_it, v_it_end) = boost::vertices(skel_graph); v_it != v_it_end;
- ++v_it) {
- root_.members_.emplace_hint(
- root_.members_.end(), *v_it,
- Node(&root_, boost::get(vertex_filtration_t(), skel_graph, *v_it)));
- }
std::pair<typename boost::graph_traits<OneSkeletonGraph>::edge_iterator,
- typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = boost::edges(skel_graph);
+ typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = edges(skel_graph);
// boost_edges.first is the equivalent to boost_edges.begin()
// boost_edges.second is the equivalent to boost_edges.end()
for (; boost_edges.first != boost_edges.second; boost_edges.first++) {
auto edge = *(boost_edges.first);
auto u = source(edge, skel_graph);
auto v = target(edge, skel_graph);
- if (u == v) throw "Self-loops are not simplicial";
+ if (u == v) throw std::invalid_argument("Self-loops are not simplicial");
// We cannot skip edges with the wrong orientation and expect them to
// come a second time with the right orientation, that does not always
// happen in practice. emplace() should be a NOP when an element with the
@@ -1084,10 +1152,25 @@ class Simplex_tree {
}
sh->second.children()->members().emplace(v,
- Node(sh->second.children(), boost::get(edge_filtration_t(), skel_graph, edge)));
+ Node(sh->second.children(), get(edge_filtration_t(), skel_graph, edge)));
}
}
+ /** \brief Inserts several vertices.
+ * @param[in] vertices A range of Vertex_handle
+ * @param[in] filt filtration value of the new vertices (the same for all)
+ *
+ * This may be faster than inserting the vertices one by one, especially in a random order.
+ * The complex does not need to be empty before calling this function. However, if a vertex is
+ * already present, its filtration value is not modified, unlike with other insertion functions. */
+ template <class VertexRange>
+ void insert_batch_vertices(VertexRange const& vertices, Filtration_value filt = 0) {
+ auto verts = vertices | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, filt)); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ if (dimension_ < 0 && !root_.members_.empty()) dimension_ = 0;
+ }
+
/** \brief Expands the Simplex_tree containing only its one skeleton
* until dimension max_dim.
*
@@ -1101,6 +1184,7 @@ class Simplex_tree {
* 1 when calling the method. */
void expansion(int max_dim) {
if (max_dim <= 1) return;
+ clear_filtration(); // Drop the cache.
dimension_ = max_dim;
for (Dictionary_it root_it = root_.members_.begin();
root_it != root_.members_.end(); ++root_it) {
@@ -1123,10 +1207,7 @@ class Simplex_tree {
Dictionary_it next = siblings->members().begin();
++next;
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<std::pair<Vertex_handle, Node> > inter;
+ thread_local std::vector<std::pair<Vertex_handle, Node> > inter;
for (Dictionary_it s_h = siblings->members().begin();
s_h != siblings->members().end(); ++s_h, ++next) {
Simplex_handle root_sh = find_vertex(s_h->first);
@@ -1243,6 +1324,7 @@ class Simplex_tree {
Siblings * new_sib = new Siblings(siblings, // oncles
simplex->first, // parent
boost::adaptors::reverse(intersection)); // boost::container::ordered_unique_range_t
+ simplex->second.assign_children(new_sib);
std::vector<Vertex_handle> blocked_new_sib_vertex_list;
// As all intersections are inserted, we can call the blocker function on all new_sib members
for (auto new_sib_member = new_sib->members().begin();
@@ -1265,7 +1347,6 @@ class Simplex_tree {
new_sib->members().erase(blocked_new_sib_member);
}
// ensure recursive call
- simplex->second.assign_children(new_sib);
siblings_expansion_with_blockers(new_sib, max_dim, k - 1, block_simplex);
}
} else {
@@ -1275,7 +1356,7 @@ class Simplex_tree {
}
}
- /* \private Returns the Simplex_handle composed of the vertex list (from the Simplex_handle), plus the given
+ /** \private Returns the Simplex_handle composed of the vertex list (from the Simplex_handle), plus the given
* Vertex_handle if the Vertex_handle is found in the Simplex_handle children list.
* Returns null_simplex() if it does not exist
*/
@@ -1314,9 +1395,8 @@ class Simplex_tree {
/** \brief This function ensures that each simplex has a higher filtration value than its faces by increasing the
* filtration values.
* @return True if any filtration value was modified, false if the filtration was already non-decreasing.
- * \post Some simplex tree functions require the filtration to be valid. `make_filtration_non_decreasing()`
- * function is not launching `initialize_filtration()` but returns the filtration modification information. If the
- * complex has changed , please call `initialize_filtration()` to recompute it.
+ *
+ * If a simplex has a `NaN` filtration value, it is considered lower than any other defined filtration value.
*/
bool make_filtration_non_decreasing() {
bool modified = false;
@@ -1326,6 +1406,8 @@ class Simplex_tree {
modified |= rec_make_filtration_non_decreasing(simplex.second.children());
}
}
+ if(modified)
+ clear_filtration(); // Drop the cache.
return modified;
}
@@ -1347,7 +1429,9 @@ class Simplex_tree {
});
Filtration_value max_filt_border_value = filtration(*max_border);
- if (simplex.second.filtration() < max_filt_border_value) {
+ // Replacing if(f<max) with if(!(f>=max)) would mean that if f is NaN, we replace it with the max of the children.
+ // That seems more useful than keeping NaN.
+ if (!(simplex.second.filtration() >= max_filt_border_value)) {
// Store the filtration modification information
modified = true;
simplex.second.assign_filtration(max_filt_border_value);
@@ -1363,16 +1447,16 @@ class Simplex_tree {
public:
/** \brief Prune above filtration value given as parameter.
* @param[in] filtration Maximum threshold value.
- * @return The filtration modification information.
- * \post Some simplex tree functions require the filtration to be valid. `prune_above_filtration()`
- * function is not launching `initialize_filtration()` but returns the filtration modification information. If the
- * complex has changed , please call `initialize_filtration()` to recompute it.
+ * @return True if any simplex was removed, false if all simplices already had a value below the threshold.
* \post Note that the dimension of the simplicial complex may be lower after calling `prune_above_filtration()`
* than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
* bound. If you care, you can call `dimension()` to recompute the exact dimension.
*/
bool prune_above_filtration(Filtration_value filtration) {
- return rec_prune_above_filtration(root(), filtration);
+ bool modified = rec_prune_above_filtration(root(), filtration);
+ if(modified)
+ clear_filtration(); // Drop the cache.
+ return modified;
}
private:
@@ -1418,14 +1502,14 @@ class Simplex_tree {
for (Simplex_handle sh : complex_simplex_range()) {
#ifdef DEBUG_TRACES
for (auto vertex : simplex_vertex_range(sh)) {
- std::cout << " " << vertex;
+ std::clog << " " << vertex;
}
- std::cout << std::endl;
+ std::clog << std::endl;
#endif // DEBUG_TRACES
int sh_dimension = dimension(sh);
if (sh_dimension >= dimension_)
- // Stop browsing as soon as the dimension is reached, no need to go furter
+ // Stop browsing as soon as the dimension is reached, no need to go further
return false;
new_dimension = (std::max)(new_dimension, sh_dimension);
}
@@ -1439,7 +1523,6 @@ class Simplex_tree {
* @param[in] sh Simplex handle on the maximal simplex to remove.
* \pre Please check the simplex has no coface before removing it.
* \exception std::invalid_argument In debug mode, if sh has children.
- * \post Be aware that removing is shifting data in a flat_map (initialize_filtration to be done).
* \post Note that the dimension of the simplicial complex may be lower after calling `remove_maximal_simplex()`
* than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
* bound. If you care, you can call `dimension()` to recompute the exact dimension.
@@ -1465,6 +1548,200 @@ class Simplex_tree {
}
}
+ /** \brief Retrieve the original filtration value for a given simplex in the Simplex_tree. Since the
+ * computation of extended persistence requires modifying the filtration values, this function can be used
+ * to recover the original values. Moreover, computing extended persistence requires adding new simplices
+ * in the Simplex_tree. Hence, this function also outputs the type of each simplex. It can be either UP (which means
+ * that the simplex was present originally, and is thus part of the ascending extended filtration), DOWN (which means
+ * that the simplex is the cone of an original simplex, and is thus part of the descending extended filtration) or
+ * EXTRA (which means the simplex is the cone point). See the definition of Extended_simplex_type. Note that if the simplex type is DOWN, the original filtration value
+ * is set to be the original filtration value of the corresponding (not coned) original simplex.
+ * \pre This function should be called only if `extend_filtration()` has been called first!
+ * \post The output filtration value is supposed to be the same, but might be a little different, than the
+ * original filtration value, due to the internal transformation (scaling to [-2,-1]) that is
+ * performed on the original filtration values during the computation of extended persistence.
+ * @param[in] f Filtration value of the simplex in the extended (i.e., modified) filtration.
+ * @param[in] efd Structure containing the minimum and maximum values of the original filtration. This the output of `extend_filtration()`.
+ * @return A pair containing the original filtration value of the simplex as well as the simplex type.
+ */
+ std::pair<Filtration_value, Extended_simplex_type> decode_extended_filtration(Filtration_value f, const Extended_filtration_data& efd){
+ std::pair<Filtration_value, Extended_simplex_type> p;
+ Filtration_value minval = efd.minval;
+ Filtration_value maxval = efd.maxval;
+ if (f >= -2 && f <= -1){
+ p.first = minval + (maxval-minval)*(f + 2); p.second = Extended_simplex_type::UP;
+ }
+ else if (f >= 1 && f <= 2){
+ p.first = minval - (maxval-minval)*(f - 2); p.second = Extended_simplex_type::DOWN;
+ }
+ else{
+ p.first = std::numeric_limits<Filtration_value>::quiet_NaN(); p.second = Extended_simplex_type::EXTRA;
+ }
+ return p;
+ };
+
+ /** \brief Extend filtration for computing extended persistence.
+ * This function only uses the filtration values at the 0-dimensional simplices,
+ * and computes the extended persistence diagram induced by the lower-star filtration
+ * computed with these values.
+ * \post Note that after calling this function, the filtration
+ * values are actually modified. The function `decode_extended_filtration()`
+ * retrieves the original values and outputs the extended simplex type.
+ * \pre Note that this code creates an extra vertex internally, so you should make sure that
+ * the Simplex tree does not contain a vertex with the largest Vertex_handle.
+ * @return A data structure containing the maximum and minimum values of the original filtration.
+ * It is meant to be provided as input to `decode_extended_filtration()` in order to retrieve
+ * the original filtration values for each simplex.
+ */
+ Extended_filtration_data extend_filtration() {
+ clear_filtration(); // Drop the cache.
+
+ // Compute maximum and minimum of filtration values
+ Vertex_handle maxvert = std::numeric_limits<Vertex_handle>::min();
+ Filtration_value minval = std::numeric_limits<Filtration_value>::infinity();
+ Filtration_value maxval = -std::numeric_limits<Filtration_value>::infinity();
+ for (auto sh = root_.members().begin(); sh != root_.members().end(); ++sh){
+ Filtration_value f = this->filtration(sh);
+ minval = std::min(minval, f);
+ maxval = std::max(maxval, f);
+ maxvert = std::max(sh->first, maxvert);
+ }
+
+ GUDHI_CHECK(maxvert < std::numeric_limits<Vertex_handle>::max(), std::invalid_argument("Simplex_tree contains a vertex with the largest Vertex_handle"));
+ maxvert += 1;
+
+ Simplex_tree st_copy = *this;
+
+ // Add point for coning the simplicial complex
+ this->insert_simplex_raw({maxvert}, -3);
+
+ // For each simplex
+ std::vector<Vertex_handle> vr;
+ for (auto sh_copy : st_copy.complex_simplex_range()){
+
+ // Locate simplex
+ vr.clear();
+ for (auto vh : st_copy.simplex_vertex_range(sh_copy)){
+ vr.push_back(vh);
+ }
+ auto sh = this->find(vr);
+
+ // Create cone on simplex
+ vr.push_back(maxvert);
+ if (this->dimension(sh) == 0){
+ Filtration_value v = this->filtration(sh);
+ Filtration_value scaled_v = (v-minval)/(maxval-minval);
+ // Assign ascending value between -2 and -1 to vertex
+ this->assign_filtration(sh, -2 + scaled_v);
+ // Assign descending value between 1 and 2 to cone on vertex
+ this->insert_simplex(vr, 2 - scaled_v);
+ }
+ else{
+ // Assign value -3 to simplex and cone on simplex
+ this->assign_filtration(sh, -3);
+ this->insert_simplex(vr, -3);
+ }
+ }
+
+ // Automatically assign good values for simplices
+ this->make_filtration_non_decreasing();
+
+ // Return the filtration data
+ Extended_filtration_data efd(minval, maxval);
+ return efd;
+ }
+
+ /** \brief Returns a vertex of `sh` that has the same filtration value as `sh` if it exists, and `null_vertex()` otherwise.
+ *
+ * For a lower-star filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which vertex had its filtration value propagated to `sh`.
+ * If several vertices have the same filtration value, the one it returns is arbitrary. */
+ Vertex_handle vertex_with_same_filtration(Simplex_handle sh) {
+ auto filt = filtration_(sh);
+ for(auto v : simplex_vertex_range(sh))
+ if(filtration_(find_vertex(v)) == filt)
+ return v;
+ return null_vertex();
+ }
+
+ /** \brief Returns an edge of `sh` that has the same filtration value as `sh` if it exists, and `null_simplex()` otherwise.
+ *
+ * For a flag-complex built with `expansion()`, this is a way to invert the process and find out which edge had its filtration value propagated to `sh`.
+ * If several edges have the same filtration value, the one it returns is arbitrary.
+ *
+ * \pre `sh` must have dimension at least 1. */
+ Simplex_handle edge_with_same_filtration(Simplex_handle sh) {
+ // See issue #251 for potential speed improvements.
+ auto&& vertices = simplex_vertex_range(sh); // vertices in decreasing order
+ auto end = std::end(vertices);
+ auto vi = std::begin(vertices);
+ GUDHI_CHECK(vi != end, "empty simplex");
+ auto v0 = *vi;
+ ++vi;
+ GUDHI_CHECK(vi != end, "simplex of dimension 0");
+ if(std::next(vi) == end) return sh; // shortcut for dimension 1
+ boost::container::static_vector<Vertex_handle, 40> suffix;
+ suffix.push_back(v0);
+ auto filt = filtration_(sh);
+ do
+ {
+ Vertex_handle v = *vi;
+ auto&& children1 = find_vertex(v)->second.children()->members_;
+ for(auto w : suffix){
+ // Can we take advantage of the fact that suffix is ordered?
+ Simplex_handle s = children1.find(w);
+ if(filtration_(s) == filt)
+ return s;
+ }
+ suffix.push_back(v);
+ }
+ while(++vi != end);
+ return null_simplex();
+ }
+
+ /** \brief Returns a minimal face of `sh` that has the same filtration value as `sh`.
+ *
+ * For a filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which simplex had its filtration value propagated to `sh`.
+ * If several minimal (for inclusion) simplices have the same filtration value, the one it returns is arbitrary, and it is not guaranteed to be the one with smallest dimension. */
+ Simplex_handle minimal_simplex_with_same_filtration(Simplex_handle sh) {
+ auto filt = filtration_(sh);
+ // Naive implementation, it can be sped up.
+ for(auto b : boundary_simplex_range(sh))
+ if(filtration_(b) == filt)
+ return minimal_simplex_with_same_filtration(b);
+ return sh; // None of its faces has the same filtration.
+ }
+
+ public:
+ /** \brief This function resets the filtration value of all the simplices of dimension at least min_dim. Resets all
+ * the Simplex_tree when `min_dim = 0`.
+ * `reset_filtration` may break the filtration property with `min_dim > 0`, and it is the user's responsibility to
+ * make it a valid filtration (using a large enough `filt_value`, or calling `make_filtration_non_decreasing`
+ * afterwards for instance).
+ * @param[in] filt_value The new filtration value.
+ * @param[in] min_dim The minimal dimension. Default value is 0.
+ */
+ void reset_filtration(Filtration_value filt_value, int min_dim = 0) {
+ rec_reset_filtration(&root_, filt_value, min_dim);
+ clear_filtration(); // Drop the cache.
+ }
+
+ private:
+ /** \brief Recursively resets filtration value when minimal depth <= 0.
+ * @param[in] sib Siblings to be parsed.
+ * @param[in] filt_value The new filtration value.
+ * @param[in] min_depth The minimal depth.
+ */
+ void rec_reset_filtration(Siblings * sib, Filtration_value filt_value, int min_depth) {
+ for (auto sh = sib->members().begin(); sh != sib->members().end(); ++sh) {
+ if (min_depth <= 0) {
+ sh->second.assign_filtration(filt_value);
+ }
+ if (has_children(sh)) {
+ rec_reset_filtration(sh->second.children(), filt_value, min_depth - 1);
+ }
+ }
+ }
+
private:
Vertex_handle null_vertex_;
/** \brief Total number of simplices in the complex, without the empty simplex.*/
@@ -1542,7 +1819,7 @@ struct Simplex_tree_options_fast_persistence {
static const bool contiguous_vertices = true;
};
-/** @} */ // end defgroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
} // namespace Gudhi
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
index efccf2f2..b63a5595 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
@@ -5,6 +5,7 @@
* Copyright (C) 2014 Inria
*
* Modification(s):
+ * - 2022/04 Vincent Rouvreau: Add Simplex_tree_boundary_opposite_vertex_simplex_iterator for alpha and cech purpose
* - YYYY/MM Author: Description of the modification
*/
@@ -14,21 +15,19 @@
#include <gudhi/Debug_utils.h>
#include <boost/iterator/iterator_facade.hpp>
-#include <boost/version.hpp>
-#if BOOST_VERSION >= 105600
-# include <boost/container/static_vector.hpp>
-#endif
+#include <boost/container/static_vector.hpp>
#include <vector>
+#include <utility> // for std::pair
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Iterators and range types for the Simplex_tree.
- * @{
+ * @{
*/
-/* \brief Iterator over the vertices of a simplex
+/** \brief Iterator over the vertices of a simplex
* in a SimplexTree.
*
* Forward iterator, 'value_type' is SimplexTree::Vertex_handle.*/
@@ -42,13 +41,13 @@ class Simplex_tree_simplex_vertex_iterator : public boost::iterator_facade<
typedef typename SimplexTree::Siblings Siblings;
typedef typename SimplexTree::Vertex_handle Vertex_handle;
- explicit Simplex_tree_simplex_vertex_iterator(SimplexTree * st)
+ explicit Simplex_tree_simplex_vertex_iterator(SimplexTree const* st)
: // any end() iterator
sib_(nullptr),
v_(st->null_vertex()) {
}
- Simplex_tree_simplex_vertex_iterator(SimplexTree * st, Simplex_handle sh)
+ Simplex_tree_simplex_vertex_iterator(SimplexTree const* st, Simplex_handle sh)
: sib_(st->self_siblings(sh)),
v_(sh->first) {
}
@@ -74,7 +73,7 @@ class Simplex_tree_simplex_vertex_iterator : public boost::iterator_facade<
};
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the simplices of the boundary of a
+/** \brief Iterator over the simplices of the boundary of a
* simplex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
@@ -87,6 +86,12 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
typedef typename SimplexTree::Vertex_handle Vertex_handle;
typedef typename SimplexTree::Siblings Siblings;
+ // For cython purpose only. The object it initializes should be overwritten ASAP and never used before it is overwritten.
+ Simplex_tree_boundary_simplex_iterator()
+ : sib_(nullptr),
+ st_(nullptr) {
+ }
+
// any end() iterator
explicit Simplex_tree_boundary_simplex_iterator(SimplexTree * st)
: last_(st->null_vertex()),
@@ -120,7 +125,7 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
private:
friend class boost::iterator_core_access;
-// valid when iterating along the SAME boundary.
+ // valid when iterating along the SAME boundary.
bool equal(Simplex_tree_boundary_simplex_iterator const& other) const {
return sh_ == other.sh_;
}
@@ -166,21 +171,127 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
// Most of the storage should be moved to the range, iterators should be light.
Vertex_handle last_; // last vertex of the simplex
Vertex_handle next_; // next vertex to push in suffix_
-#if BOOST_VERSION >= 105600
// 40 seems a conservative bound on the dimension of a Simplex_tree for now,
// as it would not fit on the biggest hard-drive.
boost::container::static_vector<Vertex_handle, 40> suffix_;
// static_vector still has some overhead compared to a trivial hand-made
// version using std::aligned_storage, or compared to making suffix_ static.
-#else
- std::vector<Vertex_handle> suffix_;
-#endif
Siblings * sib_; // where the next search will start from
Simplex_handle sh_; // current Simplex_handle in the boundary
SimplexTree * st_; // simplex containing the simplicial complex
};
+
+/** \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * Forward iterator, value_type is std::pair<SimplexTree::Simplex_handle, SimplexTree::Vertex_handle>.*/
+template<class SimplexTree>
+class Simplex_tree_boundary_opposite_vertex_simplex_iterator : public boost::iterator_facade<
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator<SimplexTree>,
+ std::pair<typename SimplexTree::Simplex_handle, typename SimplexTree::Vertex_handle> const, boost::forward_traversal_tag> {
+ public:
+ using Simplex_handle = typename SimplexTree::Simplex_handle;
+ using Vertex_handle = typename SimplexTree::Vertex_handle;
+ using Siblings = typename SimplexTree::Siblings;
+
+ // For cython purpose only. The object it initializes should be overwritten ASAP and never used before it is
+ // overwritten.
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator()
+ : sib_(nullptr),
+ st_(nullptr) {
+ }
+
+ // any end() iterator
+ explicit Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st)
+ : last_(st->null_vertex()),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), st->null_vertex()),
+ st_(st) {
+ }
+
+ template<class SimplexHandle>
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st, SimplexHandle sh)
+ : last_(sh->first),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), sh->first),
+ st_(st) {
+ // Only check once at the beginning instead of for every increment, as this is expensive.
+ if (SimplexTree::Options::contiguous_vertices)
+ GUDHI_CHECK(st_->contiguous_vertices(), "The set of vertices is not { 0, ..., n } without holes");
+ Siblings * sib = st->self_siblings(sh);
+ next_ = sib->parent();
+ sib_ = sib->oncles();
+ if (sib_ != nullptr) {
+ if (SimplexTree::Options::contiguous_vertices && sib_->oncles() == nullptr)
+ // Only relevant for edges
+ baov_.first = sib_->members_.begin()+next_;
+ else
+ baov_.first = sib_->find(next_);
+ }
+ }
+
+ private:
+ friend class boost::iterator_core_access;
+
+ // valid when iterating along the SAME boundary.
+ bool equal(Simplex_tree_boundary_opposite_vertex_simplex_iterator const& other) const {
+ return (baov_.first == other.baov_.first);
+ }
+
+ std::pair<Simplex_handle, Vertex_handle> const& dereference() const {
+ return baov_;
+ }
+
+ void increment() {
+ if (sib_ == nullptr) {
+ baov_.first = st_->null_simplex();
+ return; // ------>>
+ }
+ Siblings * for_sib = sib_;
+ Siblings * new_sib = sib_->oncles();
+ auto rit = suffix_.rbegin();
+ if (SimplexTree::Options::contiguous_vertices && new_sib == nullptr) {
+ // We reached the root, use a short-cut to find a vertex.
+ if (rit == suffix_.rend()) {
+ baov_.second = baov_.first->first;
+ // Segment, this vertex is the last boundary simplex
+ baov_.first = for_sib->members_.begin()+last_;
+ sib_ = nullptr;
+ return;
+ } else {
+ // Dim >= 2, initial step of the descent
+ baov_.first = for_sib->members_.begin()+*rit;
+ for_sib = baov_.first->second.children();
+ ++rit;
+ }
+ }
+ for (; rit != suffix_.rend(); ++rit) {
+ baov_.first = for_sib->find(*rit);
+ for_sib = baov_.first->second.children();
+ }
+ baov_.first = for_sib->find(last_); // baov_.first points to the right simplex now
+ suffix_.push_back(next_);
+ next_ = sib_->parent();
+ sib_ = new_sib;
+ baov_.second = suffix_.back();
+ }
+
+ // Most of the storage should be moved to the range, iterators should be light.
+ Vertex_handle last_; // last vertex of the simplex
+ Vertex_handle next_; // next vertex to push in suffix_
+ // 40 seems a conservative bound on the dimension of a Simplex_tree for now,
+ // as it would not fit on the biggest hard-drive.
+ boost::container::static_vector<Vertex_handle, 40> suffix_;
+ // static_vector still has some overhead compared to a trivial hand-made
+ // version using std::aligned_storage, or compared to making suffix_ static.
+ Siblings * sib_; // where the next search will start from
+ std::pair<Simplex_handle, Vertex_handle> baov_; // a pair containing the current Simplex_handle in the boundary and its opposite vertex
+ SimplexTree * st_; // simplex containing the simplicial complex
+};
+
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the simplices of a simplicial complex.
+/** \brief Iterator over the simplices of a simplicial complex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
template<class SimplexTree>
@@ -253,7 +364,7 @@ class Simplex_tree_complex_simplex_iterator : public boost::iterator_facade<
SimplexTree * st_;
};
-/* \brief Iterator over the simplices of the skeleton of a given
+/** \brief Iterator over the simplices of the skeleton of a given
* dimension of the simplicial complex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
@@ -336,7 +447,8 @@ class Simplex_tree_skeleton_simplex_iterator : public boost::iterator_facade<
int curr_dim_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_ITERATORS_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
index ae140859..63023daa 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
@@ -15,16 +15,15 @@
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Represents a node of a Simplex_tree.
* @{
*/
-/*
- * \brief Node of a simplex tree with filtration value
+/** \brief Node of a simplex tree with filtration value
* and simplex key.
*
- * It stores explicitely its own filtration value and its own Simplex_key.
+ * It stores explicitly its own filtration value and its own Simplex_key.
*/
template<class SimplexTree>
struct Simplex_tree_node_explicit_storage : SimplexTree::Filtration_simplex_base, SimplexTree::Key_simplex_base {
@@ -54,7 +53,8 @@ struct Simplex_tree_node_explicit_storage : SimplexTree::Filtration_simplex_base
Siblings * children_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_NODE_EXPLICIT_STORAGE_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
index b53bad29..d849eeba 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
@@ -20,12 +20,12 @@
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Represents a set of node of a Simplex_tree that share the same parent.
* @{
*/
-/* \brief Data structure to store a set of nodes in a SimplexTree sharing
+/** \brief Data structure to store a set of nodes in a SimplexTree sharing
* the same parent node.*/
template<class SimplexTree, class MapContainer>
class Simplex_tree_siblings {
@@ -36,6 +36,7 @@ class Simplex_tree_siblings {
template<class T> friend class Simplex_tree_boundary_simplex_iterator;
template<class T> friend class Simplex_tree_complex_simplex_iterator;
template<class T> friend class Simplex_tree_skeleton_simplex_iterator;
+ template<class T> friend class Simplex_tree_boundary_opposite_vertex_simplex_iterator;
typedef typename SimplexTree::Vertex_handle Vertex_handle;
typedef typename SimplexTree::Filtration_value Filtration_value;
@@ -57,7 +58,7 @@ class Simplex_tree_siblings {
members_() {
}
- /* \brief Constructor with initialized set of members.
+ /** \brief Constructor with initialized set of members.
*
* 'members' must be sorted and unique.*/
template<typename RandomAccessVertexRange>
@@ -71,8 +72,7 @@ class Simplex_tree_siblings {
}
}
- /*
- * \brief Inserts a Node in the set of siblings nodes.
+ /** \brief Inserts a Node in the set of siblings nodes.
*
* If already present, assigns the minimal filtration value
* between input filtration_value and the value already
@@ -113,7 +113,8 @@ class Simplex_tree_siblings {
Dictionary members_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_SIBLINGS_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h b/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
index 3e395ae2..29c76e50 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
@@ -20,7 +20,7 @@ namespace Gudhi {
struct linear_indexing_tag {
};
-/* \brief Tag for a zigzag ordering of simplices. */
+/** \brief Tag for a zigzag ordering of simplices. */
// struct zigzag_indexing_tag {};
} // namespace Gudhi
diff --git a/src/Simplex_tree/test/CMakeLists.txt b/src/Simplex_tree/test/CMakeLists.txt
index 8b9163f5..25b562e0 100644
--- a/src/Simplex_tree/test/CMakeLists.txt
+++ b/src/Simplex_tree/test/CMakeLists.txt
@@ -28,3 +28,15 @@ if (TBB_FOUND)
target_link_libraries(Simplex_tree_ctor_and_move_test_unit ${TBB_LIBRARIES})
endif()
gudhi_add_boost_test(Simplex_tree_ctor_and_move_test_unit)
+
+add_executable ( Simplex_tree_make_filtration_non_decreasing_test_unit simplex_tree_make_filtration_non_decreasing_unit_test.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Simplex_tree_make_filtration_non_decreasing_test_unit ${TBB_LIBRARIES})
+endif()
+gudhi_add_boost_test(Simplex_tree_make_filtration_non_decreasing_test_unit)
+
+add_executable ( Simplex_tree_graph_expansion_test_unit simplex_tree_graph_expansion_unit_test.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Simplex_tree_graph_expansion_test_unit ${TBB_LIBRARIES})
+endif()
+gudhi_add_boost_test(Simplex_tree_graph_expansion_test_unit)
diff --git a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
index c0615b12..f6118fe0 100644
--- a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
@@ -30,16 +30,16 @@ void print_simplex_filtration(Simplex_tree& st, const std::string& msg) {
// Required before browsing through filtration values
st.initialize_filtration();
- std::cout << "********************************************************************\n";
- std::cout << "* " << msg << "\n";
- std::cout << "* The complex contains " << st.num_simplices() << " simplices";
- std::cout << " - dimension " << st.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "********************************************************************\n";
+ std::clog << "* " << msg << "\n";
+ std::clog << "* The complex contains " << st.num_simplices() << " simplices";
+ std::clog << " - dimension " << st.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " "
+ std::clog << " "
<< "[" << st.filtration(f_simplex) << "] ";
- for (auto vertex : st.simplex_vertex_range(f_simplex)) std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ for (auto vertex : st.simplex_vertex_range(f_simplex)) std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
}
@@ -70,8 +70,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
print_simplex_filtration(st, "Default Simplex_tree is initialized");
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF COPY CONSTRUCTOR" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF COPY CONSTRUCTOR" << std::endl;
Simplex_tree st1(st);
Simplex_tree st2(st);
@@ -82,8 +82,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
BOOST_CHECK(st == st2);
BOOST_CHECK(st1 == st);
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF COPY ASSIGNMENT" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF COPY ASSIGNMENT" << std::endl;
Simplex_tree st3;
// To check there is no memory leak
st3.insert_simplex_and_subfaces({9, 10, 11}, 200.0);
@@ -98,13 +98,21 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
BOOST_CHECK(st == st4);
BOOST_CHECK(st3 == st);
+#ifdef __clang__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
+#endif
st = st;
- print_simplex_filtration(st4, "Third self copy assignment from the default Simplex_tree");
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+
+ print_simplex_filtration(st, "Third self copy assignment from the default Simplex_tree");
BOOST_CHECK(st3 == st);
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF MOVE CONSTRUCTOR" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF MOVE CONSTRUCTOR" << std::endl;
Simplex_tree st5(std::move(st1));
print_simplex_filtration(st5, "First move constructor from the default Simplex_tree");
print_simplex_filtration(st1, "First moved Simplex_tree shall be empty");
@@ -122,8 +130,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
BOOST_CHECK(empty_st == st2);
BOOST_CHECK(st1 == empty_st);
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF MOVE ASSIGNMENT" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF MOVE ASSIGNMENT" << std::endl;
Simplex_tree st7;
// To check there is no memory leak
diff --git a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
index fab25eb8..54e23204 100644
--- a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
@@ -9,33 +9,62 @@
*/
#include <iostream>
-#include <fstream>
-#include <string>
-#include <algorithm>
-#include <utility> // std::pair, std::make_pair
-#include <cmath> // float comparison
-#include <limits>
-#include <functional> // greater
+#include <vector>
#define BOOST_TEST_DYN_LINK
-#define BOOST_TEST_MODULE "simplex_tree"
+#define BOOST_TEST_MODULE "simplex_tree_graph_expansion"
#include <boost/test/unit_test.hpp>
#include <boost/mpl/list.hpp>
-// ^
-// /!\ Nothing else from Simplex_tree shall be included to test includes are well defined.
#include "gudhi/Simplex_tree.h"
+#include <gudhi/Unitary_tests_utils.h>
using namespace Gudhi;
typedef boost::mpl::list<Simplex_tree<>, Simplex_tree<Simplex_tree_options_fast_persistence>> list_of_tested_variants;
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_all_is_blocked, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_all_is_blocked\n";
+ std::clog << "********************************************************************\n";
+ using Simplex_handle = typename typeST::Simplex_handle;
+ // Construct the Simplex Tree with a 1-skeleton graph example
+ typeST simplex_tree;
+
+ simplex_tree.insert_simplex({0, 1}, 0.);
+ simplex_tree.insert_simplex({0, 2}, 1.);
+ simplex_tree.insert_simplex({0, 3}, 2.);
+ simplex_tree.insert_simplex({1, 2}, 3.);
+ simplex_tree.insert_simplex({1, 3}, 4.);
+ simplex_tree.insert_simplex({2, 3}, 5.);
+ simplex_tree.insert_simplex({2, 4}, 6.);
+ simplex_tree.insert_simplex({3, 6}, 7.);
+ simplex_tree.insert_simplex({4, 5}, 8.);
+ simplex_tree.insert_simplex({4, 6}, 9.);
+ simplex_tree.insert_simplex({5, 6}, 10.);
+ simplex_tree.insert_simplex({6}, 10.);
+
+ typeST stree_copy = simplex_tree;
+
+ simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){ return true; });
+
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
+ }
-bool AreAlmostTheSame(float a, float b) {
- return std::fabs(a - b) < std::numeric_limits<float>::epsilon();
+ BOOST_CHECK(stree_copy == simplex_tree);
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_blockers_3\n";
+ std::clog << "********************************************************************\n";
using Simplex_handle = typename typeST::Simplex_handle;
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -55,49 +84,54 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, li
simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){
bool result = false;
- std::cout << "Blocker on [";
+ std::clog << "Blocker on [";
// User can loop on the vertices from the given simplex_handle i.e.
for (auto vertex : simplex_tree.simplex_vertex_range(sh)) {
// We block the expansion, if the vertex '6' is in the given list of vertices
if (vertex == 6)
result = true;
- std::cout << vertex << ", ";
+ std::clog << vertex << ", ";
}
- std::cout << "] ( " << simplex_tree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ std::clog << "] ( " << simplex_tree.filtration(sh);
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.);
- std::cout << " + 1. ) = " << result << std::endl;
+ std::clog << " + 1. ) = " << result << std::endl;
return result;
});
- std::cout << "********************************************************************\n";
- std::cout << "simplex_tree_expansion_with_blockers_3\n";
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
- std::cout << " - dimension " << simplex_tree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
- std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
BOOST_CHECK(simplex_tree.num_simplices() == 23);
BOOST_CHECK(simplex_tree.dimension() == 3);
// {4, 5, 6} shall be blocked
BOOST_CHECK(simplex_tree.find({4, 5, 6}) == simplex_tree.null_simplex());
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2,3})), 7.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2,3})),
+ static_cast<typename typeST::Filtration_value>(7.));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_blockers_2\n";
+ std::clog << "********************************************************************\n";
using Simplex_handle = typename typeST::Simplex_handle;
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -117,48 +151,112 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, li
simplex_tree.expansion_with_blockers(2, [&](Simplex_handle sh){
bool result = false;
- std::cout << "Blocker on [";
+ std::clog << "Blocker on [";
// User can loop on the vertices from the given simplex_handle i.e.
for (auto vertex : simplex_tree.simplex_vertex_range(sh)) {
// We block the expansion, if the vertex '6' is in the given list of vertices
if (vertex == 6)
result = true;
- std::cout << vertex << ", ";
+ std::clog << vertex << ", ";
}
- std::cout << "] ( " << simplex_tree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ std::clog << "] ( " << simplex_tree.filtration(sh);
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.);
- std::cout << " + 1. ) = " << result << std::endl;
+ std::clog << " + 1. ) = " << result << std::endl;
return result;
});
- std::cout << "********************************************************************\n";
- std::cout << "simplex_tree_expansion_with_blockers_2\n";
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
- std::cout << " - dimension " << simplex_tree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
- std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
BOOST_CHECK(simplex_tree.num_simplices() == 22);
BOOST_CHECK(simplex_tree.dimension() == 2);
// {4, 5, 6} shall be blocked
BOOST_CHECK(simplex_tree.find({4, 5, 6}) == simplex_tree.null_simplex());
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
BOOST_CHECK(simplex_tree.find({0,1,2,3}) == simplex_tree.null_simplex());
}
-BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_variants) {
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_find_simplex_blockers, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_find_simplex_blockers\n";
+ std::clog << "********************************************************************\n";
+ using Simplex_handle = typename typeST::Simplex_handle;
+ // Construct the Simplex Tree with a 1-skeleton graph example
+ typeST simplex_tree;
+
+ simplex_tree.insert_simplex({0, 1}, 0.);
+ simplex_tree.insert_simplex({0, 2}, 1.);
+ simplex_tree.insert_simplex({0, 3}, 2.);
+ simplex_tree.insert_simplex({1, 2}, 3.);
+ simplex_tree.insert_simplex({1, 3}, 4.);
+ simplex_tree.insert_simplex({2, 3}, 5.);
+ simplex_tree.insert_simplex({2, 4}, 6.);
+ simplex_tree.insert_simplex({3, 6}, 7.);
+ simplex_tree.insert_simplex({4, 5}, 8.);
+ simplex_tree.insert_simplex({4, 6}, 9.);
+ simplex_tree.insert_simplex({5, 6}, 10.);
+ simplex_tree.insert_simplex({6}, 10.);
+
+ simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){
+ bool result = false;
+ std::clog << "Blocker on [";
+ std::vector<typename typeST::Vertex_handle> simplex;
+ // User can loop on the vertices from the given simplex_handle i.e.
+ for (auto vertex : simplex_tree.simplex_vertex_range(sh)) {
+ // We block the expansion, if the vertex '1' is in the given list of vertices
+ if (vertex == 1)
+ result = true;
+ std::clog << vertex << ", ";
+ simplex.push_back(vertex);
+ }
+ std::clog << "] => " << result << std::endl;
+ // Not efficient but test it works - required by the python interface
+ BOOST_CHECK(simplex_tree.find(simplex) == sh);
+ return result;
+ });
+
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
+ }
+
+ BOOST_CHECK(simplex_tree.num_simplices() == 20);
+ BOOST_CHECK(simplex_tree.dimension() == 2);
+
+ // {1, 2, 3}, {0, 1, 2} and {0, 1, 3} shall be blocked as it contains vertex 1
+ BOOST_CHECK(simplex_tree.find({4, 5, 6}) != simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({1, 2, 3}) == simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 2, 3}) != simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 1, 2}) == simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 1, 3}) == simplex_tree.null_simplex());
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_3, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_3\n";
+ std::clog << "********************************************************************\n";
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -176,32 +274,38 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_var
simplex_tree.insert_simplex({6}, 10.);
simplex_tree.expansion(3);
- std::cout << "********************************************************************\n";
- std::cout << "simplex_tree_expansion_3\n";
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
- std::cout << " - dimension " << simplex_tree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
- std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
BOOST_CHECK(simplex_tree.num_simplices() == 24);
BOOST_CHECK(simplex_tree.dimension() == 3);
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({4,5,6})), 10.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 3.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2,3})), 5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({4,5,6})),
+ static_cast<typename typeST::Filtration_value>(10.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(3.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_2\n";
+ std::clog << "********************************************************************\n";
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -220,26 +324,28 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_v
simplex_tree.expansion(2);
- std::cout << "********************************************************************\n";
- std::cout << "simplex_tree_expansion_2\n";
- std::cout << "********************************************************************\n";
- std::cout << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
- std::cout << " - dimension " << simplex_tree.dimension() << "\n";
- std::cout << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
- std::cout << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
- std::cout << "(" << vertex << ")";
- std::cout << std::endl;
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
}
BOOST_CHECK(simplex_tree.num_simplices() == 23);
BOOST_CHECK(simplex_tree.dimension() == 2);
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({4,5,6})), 10.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 3.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({4,5,6})),
+ static_cast<typename typeST::Filtration_value>(10.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(3.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
BOOST_CHECK(simplex_tree.find({0,1,2,3}) == simplex_tree.null_simplex());
}
diff --git a/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp
index 28c29489..20007488 100644
--- a/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_iostream_operator_unit_test.cpp
@@ -34,8 +34,8 @@ typedef boost::mpl::list<Simplex_tree<>,
> list_of_tested_variants;
BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_variants) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "SIMPLEX TREE IOSTREAM OPERATOR" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "SIMPLEX TREE IOSTREAM OPERATOR" << std::endl;
Stree_type st;
@@ -46,15 +46,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = "
+ std::clog << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = "
<< st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
// st:
@@ -75,15 +75,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari
simplex_tree_istream >> read_st;
// Display the Simplex_tree
- std::cout << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = "
+ std::clog << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = "
<< read_st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : read_st.filtration_simplex_range()) {
- std::cout << " " << "[" << read_st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << read_st.filtration(f_simplex) << "] ";
for (auto vertex : read_st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
BOOST_CHECK(st == read_st);
@@ -91,8 +91,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(iostream_operator, Stree_type, list_of_tested_vari
BOOST_AUTO_TEST_CASE(mini_iostream_operator) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "MINI SIMPLEX TREE IOSTREAM OPERATOR" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "MINI SIMPLEX TREE IOSTREAM OPERATOR" << std::endl;
Simplex_tree<MyOptions> st;
@@ -103,14 +103,14 @@ BOOST_AUTO_TEST_CASE(mini_iostream_operator) {
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = "
+ std::clog << "The ORIGINAL complex contains " << st.num_simplices() << " simplices - dimension = "
<< st.dimension() << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
// st:
@@ -131,15 +131,15 @@ BOOST_AUTO_TEST_CASE(mini_iostream_operator) {
simplex_tree_istream >> read_st;
// Display the Simplex_tree
- std::cout << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = "
+ std::clog << "The READ complex contains " << read_st.num_simplices() << " simplices - dimension = "
<< read_st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : read_st.filtration_simplex_range()) {
- std::cout << " " << "[" << read_st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << read_st.filtration(f_simplex) << "] ";
for (auto vertex : read_st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
BOOST_CHECK(st == read_st);
diff --git a/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp
new file mode 100644
index 00000000..e0e7cadf
--- /dev/null
+++ b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp
@@ -0,0 +1,148 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <iostream>
+#include <limits> // for NaN
+#include <cmath> // for isNaN
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "simplex_tree_make_filtration_non_decreasing"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+// ^
+// /!\ Nothing else from Simplex_tree shall be included to test includes are well defined.
+#include "gudhi/Simplex_tree.h"
+
+using namespace Gudhi;
+
+typedef boost::mpl::list<Simplex_tree<>, Simplex_tree<Simplex_tree_options_fast_persistence>> list_of_tested_variants;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing, typeST, list_of_tested_variants) {
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 2.0);
+ st.insert_simplex_and_subfaces({3, 0}, 2.0);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
+
+ /* Inserted simplex: */
+ /* 1 */
+ /* o */
+ /* /X\ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+
+ std::clog << "Check default insertion ensures the filtration values are non decreasing" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+
+ // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0
+ // to 1.0
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
+
+ // Inserted simplex:
+ // 1 6
+ // o---o
+ // /X\7/
+ // o---o---o---o
+ // 2 0 3\X/4
+ // o
+ // 5
+
+ std::clog << "Check default second insertion ensures the filtration values are non decreasing" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+
+ // Copy original simplex tree
+ typeST st_copy = st;
+
+ // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing
+ st.assign_filtration(st.find({0,1,6,7}), 0.8);
+ st.assign_filtration(st.find({0,1,6}), 0.9);
+ st.assign_filtration(st.find({0,6}), 0.6);
+ st.assign_filtration(st.find({3,4,5}), 1.2);
+ st.assign_filtration(st.find({3,4}), 1.1);
+ st.assign_filtration(st.find({4,5}), 1.99);
+
+ std::clog << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_copy);
+
+ // Other simplex tree
+ typeST st_other;
+ st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st
+ st_other.insert_simplex_and_subfaces({3, 0}, 2.0);
+ st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
+ st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
+
+ // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing
+ st.assign_filtration(st.find({2}), 3.0);
+ // By modifying just the simplex {2}
+ // {0,1,2}, {1,2} and {0,2} will be modified
+
+ std::clog << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_other);
+
+ // Modify specific values for st still to be non-decreasing
+ st.assign_filtration(st.find({0,1,2}), 10.0);
+ st.assign_filtration(st.find({0,2}), 9.0);
+ st.assign_filtration(st.find({0,1,6,7}), 50.0);
+ st.assign_filtration(st.find({0,1,6}), 49.0);
+ st.assign_filtration(st.find({0,1,7}), 48.0);
+ // Other copy simplex tree
+ typeST st_other_copy = st;
+
+ std::clog << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_other_copy);
+
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing_on_nan_values, typeST, list_of_tested_variants) {
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, std::numeric_limits<double>::quiet_NaN());
+ st.insert_simplex_and_subfaces({3, 0}, std::numeric_limits<double>::quiet_NaN());
+ st.insert_simplex_and_subfaces({3, 4, 5}, std::numeric_limits<double>::quiet_NaN());
+
+ /* Inserted simplex: */
+ /* 1 */
+ /* o */
+ /* /X\ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+
+ std::clog << "SPECIFIC CASE:" << std::endl;
+ std::clog << "Insertion with NaN values does not ensure the filtration values are non decreasing" << std::endl;
+ st.make_filtration_non_decreasing();
+
+ std::clog << "Check all filtration values are NaN" << std::endl;
+ for (auto f_simplex : st.complex_simplex_range()) {
+ BOOST_CHECK(std::isnan(st.filtration(f_simplex)));
+ }
+
+ st.assign_filtration(st.find({0}), 0.);
+ st.assign_filtration(st.find({1}), 0.);
+ st.assign_filtration(st.find({2}), 0.);
+ st.assign_filtration(st.find({3}), 0.);
+ st.assign_filtration(st.find({4}), 0.);
+ st.assign_filtration(st.find({5}), 0.);
+
+ std::clog << "Check make_filtration_non_decreasing is modifying the simplicial complex" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+
+ std::clog << "Check all filtration values are now defined" << std::endl;
+ for (auto f_simplex : st.complex_simplex_range()) {
+ BOOST_CHECK(!std::isnan(st.filtration(f_simplex)));
+ }
+}
diff --git a/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp
index 97347992..36b8b3c6 100644
--- a/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_remove_unit_test.cpp
@@ -32,8 +32,8 @@ using Mini_stree = Simplex_tree<MyOptions>;
using Stree = Simplex_tree<>;
BOOST_AUTO_TEST_CASE(remove_maximal_simplex) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "REMOVE MAXIMAL SIMPLEX" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "REMOVE MAXIMAL SIMPLEX" << std::endl;
Mini_stree st;
@@ -66,21 +66,21 @@ BOOST_AUTO_TEST_CASE(remove_maximal_simplex) {
// 5
#ifdef GUDHI_DEBUG
- std::cout << "Check exception throw in debug mode" << std::endl;
+ std::clog << "Check exception throw in debug mode" << std::endl;
// throw excpt because sh has children
BOOST_CHECK_THROW (st.remove_maximal_simplex(st.find({0, 1, 6})), std::invalid_argument);
BOOST_CHECK_THROW (st.remove_maximal_simplex(st.find({3})), std::invalid_argument);
BOOST_CHECK(st == st_complete);
#endif
- std::cout << "st.remove_maximal_simplex({0, 2})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 2})" << std::endl;
st.remove_maximal_simplex(st.find({0, 2}));
- std::cout << "st.remove_maximal_simplex({0, 1, 2})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 1, 2})" << std::endl;
st.remove_maximal_simplex(st.find({0, 1, 2}));
- std::cout << "st.remove_maximal_simplex({1, 2})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 2})" << std::endl;
st.remove_maximal_simplex(st.find({1, 2}));
- std::cout << "st.remove_maximal_simplex({2})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({2})" << std::endl;
st.remove_maximal_simplex(st.find({2}));
- std::cout << "st.remove_maximal_simplex({3})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({3})" << std::endl;
st.remove_maximal_simplex(st.find({0, 3}));
BOOST_CHECK(st == st_pruned);
@@ -102,39 +102,39 @@ BOOST_AUTO_TEST_CASE(remove_maximal_simplex) {
// 5
// Remove all 7 to test the both remove_maximal_simplex cases (when _members is empty or not)
- std::cout << "st.remove_maximal_simplex({0, 1, 6, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 1, 6, 7})" << std::endl;
st.remove_maximal_simplex(st.find({0, 1, 6, 7}));
- std::cout << "st.remove_maximal_simplex({0, 1, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 1, 7})" << std::endl;
st.remove_maximal_simplex(st.find({0, 1, 7}));
- std::cout << "st.remove_maximal_simplex({0, 6, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 6, 7})" << std::endl;
st.remove_maximal_simplex(st.find({0, 6, 7}));
- std::cout << "st.remove_maximal_simplex({0, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 7})" << std::endl;
st.remove_maximal_simplex(st.find({0, 7}));
- std::cout << "st.remove_maximal_simplex({1, 6, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 6, 7})" << std::endl;
st.remove_maximal_simplex(st.find({1, 6, 7}));
- std::cout << "st.remove_maximal_simplex({1, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 7})" << std::endl;
st.remove_maximal_simplex(st.find({1, 7}));
- std::cout << "st.remove_maximal_simplex({6, 7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({6, 7})" << std::endl;
st.remove_maximal_simplex(st.find({6, 7}));
- std::cout << "st.remove_maximal_simplex({7})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({7})" << std::endl;
st.remove_maximal_simplex(st.find({7}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
// Check dimension calls lower_upper_bound_dimension to recompute dimension
BOOST_CHECK(st.dimension() == 2);
BOOST_CHECK(st.upper_bound_dimension() == 2);
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension()
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension()
<< " | st_wo_seven.upper_bound_dimension()=" << st_wo_seven.upper_bound_dimension() << std::endl;
- std::cout << "st.dimension()=" << st.dimension() << " | st_wo_seven.dimension()=" << st_wo_seven.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << " | st_wo_seven.dimension()=" << st_wo_seven.dimension() << std::endl;
BOOST_CHECK(st == st_wo_seven);
}
BOOST_AUTO_TEST_CASE(auto_dimension_set) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "DIMENSION ON REMOVE MAXIMAL SIMPLEX" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "DIMENSION ON REMOVE MAXIMAL SIMPLEX" << std::endl;
Mini_stree st;
@@ -148,80 +148,80 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) {
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({6, 7, 8, 10})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({6, 7, 8, 10})" << std::endl;
st.remove_maximal_simplex(st.find({6, 7, 8, 10}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({6, 7, 8, 9})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({6, 7, 8, 9})" << std::endl;
st.remove_maximal_simplex(st.find({6, 7, 8, 9}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl;
st.remove_maximal_simplex(st.find({1, 2, 3, 4}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl;
st.remove_maximal_simplex(st.find({1, 2, 3, 5}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 2);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
- std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl;
st.insert_simplex_and_subfaces({1, 2, 3, 5});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl;
st.insert_simplex_and_subfaces({1, 2, 3, 4});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 2, 3, 5})" << std::endl;
st.remove_maximal_simplex(st.find({1, 2, 3, 5}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({1, 2, 3, 4})" << std::endl;
st.remove_maximal_simplex(st.find({1, 2, 3, 4}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 2);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
- std::cout << "st.insert_simplex_and_subfaces({0, 1, 3, 4})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({0, 1, 3, 4})" << std::endl;
st.insert_simplex_and_subfaces({0, 1, 3, 4});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.remove_maximal_simplex({0, 1, 3, 4})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 1, 3, 4})" << std::endl;
st.remove_maximal_simplex(st.find({0, 1, 3, 4}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 2);
- std::cout << "st.dimension()=" << st.dimension() << std::endl;
+ std::clog << "st.dimension()=" << st.dimension() << std::endl;
- std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 5})" << std::endl;
st.insert_simplex_and_subfaces({1, 2, 3, 5});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({1, 2, 3, 4})" << std::endl;
st.insert_simplex_and_subfaces({1, 2, 3, 4});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
@@ -229,7 +229,7 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) {
// Check you can override the dimension
// This is a limit test case - shall not happen
st.set_dimension(1);
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 1);
// check dimension() and lower_upper_bound_dimension() is not giving the right answer because dimension is too low
BOOST_CHECK(st.dimension() == 1);
@@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) {
// Check you can override the dimension
// This is a limit test case - shall not happen
st.set_dimension(6);
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 6);
// check dimension() do not launch lower_upper_bound_dimension()
BOOST_CHECK(st.dimension() == 6);
@@ -246,27 +246,27 @@ BOOST_AUTO_TEST_CASE(auto_dimension_set) {
// Reset with the correct value
st.set_dimension(3);
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == 3);
- std::cout << "st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6})" << std::endl;
+ std::clog << "st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6})" << std::endl;
st.insert_simplex_and_subfaces({0, 1, 2, 3, 4, 5, 6});
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 6);
BOOST_CHECK(st.dimension() == 6);
- std::cout << "st.remove_maximal_simplex({0, 1, 2, 3, 4, 5, 6})" << std::endl;
+ std::clog << "st.remove_maximal_simplex({0, 1, 2, 3, 4, 5, 6})" << std::endl;
st.remove_maximal_simplex(st.find({0, 1, 2, 3, 4, 5, 6}));
- std::cout << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "st.upper_bound_dimension()=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 6);
BOOST_CHECK(st.dimension() == 5);
}
BOOST_AUTO_TEST_CASE(prune_above_filtration) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "PRUNE ABOVE FILTRATION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "PRUNE ABOVE FILTRATION" << std::endl;
Stree st;
@@ -321,15 +321,15 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) {
BOOST_CHECK(!simplex_is_changed);
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices";
- std::cout << " - dimension " << st.dimension() << std::endl;
- std::cout << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices";
+ std::clog << " - dimension " << st.dimension() << std::endl;
+ std::clog << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
// Check the pruned cases
@@ -340,15 +340,15 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) {
BOOST_CHECK(simplex_is_changed);
// Display the Simplex_tree
- std::cout << "The complex pruned at 2.5 contains " << st.num_simplices() << " simplices";
- std::cout << " - dimension " << st.dimension() << std::endl;
+ std::clog << "The complex pruned at 2.5 contains " << st.num_simplices() << " simplices";
+ std::clog << " - dimension " << st.dimension() << std::endl;
simplex_is_changed = st.prune_above_filtration(2.0);
if (simplex_is_changed)
st.initialize_filtration();
- std::cout << "The complex pruned at 2.0 contains " << st.num_simplices() << " simplices";
- std::cout << " - dimension " << st.dimension() << std::endl;
+ std::clog << "The complex pruned at 2.0 contains " << st.num_simplices() << " simplices";
+ std::clog << " - dimension " << st.dimension() << std::endl;
BOOST_CHECK(st == st_pruned);
BOOST_CHECK(!simplex_is_changed);
@@ -360,12 +360,12 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) {
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices";
- std::cout << " - upper_bound_dimension " << st.upper_bound_dimension() << std::endl;
+ std::clog << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices";
+ std::clog << " - upper_bound_dimension " << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == 3);
BOOST_CHECK(st.dimension() == -1);
- std::cout << "upper_bound_dimension=" << st.upper_bound_dimension() << std::endl;
+ std::clog << "upper_bound_dimension=" << st.upper_bound_dimension() << std::endl;
BOOST_CHECK(st.upper_bound_dimension() == -1);
BOOST_CHECK(st == st_empty);
@@ -380,8 +380,8 @@ BOOST_AUTO_TEST_CASE(prune_above_filtration) {
}
BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "MINI PRUNE ABOVE FILTRATION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "MINI PRUNE ABOVE FILTRATION" << std::endl;
Mini_stree st;
@@ -402,7 +402,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) {
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
BOOST_CHECK(st.num_simplices() == 27);
// Test case to the limit - With these options, there is no filtration, which means filtration is 0
@@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) {
if (simplex_is_changed)
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The complex pruned at 1.0 contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << "The complex pruned at 1.0 contains " << st.num_simplices() << " simplices" << std::endl;
BOOST_CHECK(!simplex_is_changed);
BOOST_CHECK(st.num_simplices() == 27);
@@ -418,7 +418,7 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) {
if (simplex_is_changed)
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << "The complex pruned at 0.0 contains " << st.num_simplices() << " simplices" << std::endl;
BOOST_CHECK(!simplex_is_changed);
BOOST_CHECK(st.num_simplices() == 27);
@@ -427,11 +427,11 @@ BOOST_AUTO_TEST_CASE(mini_prune_above_filtration) {
if (simplex_is_changed)
st.initialize_filtration();
// Display the Simplex_tree
- std::cout << "The complex pruned at -1.0 contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << "The complex pruned at -1.0 contains " << st.num_simplices() << " simplices" << std::endl;
BOOST_CHECK(simplex_is_changed);
BOOST_CHECK(st.num_simplices() == 0);
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
}
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index 58bfa8db..ebcc406c 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -17,6 +17,8 @@
#include <limits>
#include <functional> // greater
#include <tuple> // std::tie
+#include <iterator> // for std::distance
+#include <cstddef> // for std::size_t
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "simplex_tree"
@@ -48,22 +50,22 @@ void test_empty_simplex_tree(typeST& tst) {
template<class typeST>
void test_iterators_on_empty_simplex_tree(typeST& tst) {
- std::cout << "Iterator on vertices: " << std::endl;
+ std::clog << "Iterator on vertices: " << std::endl;
for (auto vertex : tst.complex_vertex_range()) {
- std::cout << "vertice:" << vertex << std::endl;
+ std::clog << "vertice:" << vertex << std::endl;
BOOST_CHECK(false); // shall be empty
}
- std::cout << "Iterator on simplices: " << std::endl;
+ std::clog << "Iterator on simplices: " << std::endl;
for (auto simplex : tst.complex_simplex_range()) {
BOOST_CHECK(simplex != simplex); // shall be empty - to remove warning of non-used simplex
}
- std::cout
+ std::clog
<< "Iterator on Simplices in the filtration, with [filtration value]:"
<< std::endl;
for (auto f_simplex : tst.filtration_simplex_range()) {
BOOST_CHECK(false); // shall be empty
- std::cout << "test_iterators_on_empty_simplex_tree - filtration="
+ std::clog << "test_iterators_on_empty_simplex_tree - filtration="
<< tst.filtration(f_simplex) << std::endl;
}
}
@@ -72,15 +74,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_when_empty, typeST, list_of_tested_va
typedef std::pair<typename typeST::Simplex_handle, bool> typePairSimplexBool;
typedef std::vector<typename typeST::Vertex_handle> typeVectorVertex;
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF DEFAULT CONSTRUCTOR" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF DEFAULT CONSTRUCTOR" << std::endl;
typeST st;
test_empty_simplex_tree(st);
test_iterators_on_empty_simplex_tree(st);
// TEST OF EMPTY INSERTION
- std::cout << "TEST OF EMPTY INSERTION" << std::endl;
+ std::clog << "TEST OF EMPTY INSERTION" << std::endl;
typeVectorVertex simplexVectorEmpty;
BOOST_CHECK(simplexVectorEmpty.empty() == true);
typePairSimplexBool returnEmptyValue = st.insert_simplex(simplexVectorEmpty, 0.0);
@@ -98,8 +100,8 @@ bool AreAlmostTheSame(float a, float b) {
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_from_file, typeST, list_of_tested_variants) {
// TEST OF INSERTION
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF SIMPLEX TREE FROM A FILE" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF SIMPLEX TREE FROM A FILE" << std::endl;
typeST st;
std::string inputFile("simplex_tree_for_unit_test.txt");
@@ -107,8 +109,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_from_file, typeST, list_of_tested_var
simplex_tree_stream >> st;
// Display the Simplex_tree
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
- std::cout << " - dimension " << st.dimension() << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << " - dimension " << st.dimension() << std::endl;
// Check
BOOST_CHECK(st.num_simplices() == 143353);
@@ -134,13 +136,13 @@ template<class typeST, class typeSimplex>
void test_simplex_tree_contains(typeST& simplexTree, typeSimplex& simplex, int pos) {
auto f_simplex = simplexTree.filtration_simplex_range().begin() + pos;
- std::cout << "test_simplex_tree_contains - filtration=" << simplexTree.filtration(*f_simplex) << "||" << simplex.second << std::endl;
+ std::clog << "test_simplex_tree_contains - filtration=" << simplexTree.filtration(*f_simplex) << "||" << simplex.second << std::endl;
BOOST_CHECK(AreAlmostTheSame(simplexTree.filtration(*f_simplex), simplex.second));
int simplexIndex = simplex.first.size() - 1;
std::sort(simplex.first.begin(), simplex.first.end()); // if the simplex wasn't sorted, the next test could fail
for (auto vertex : simplexTree.simplex_vertex_range(*f_simplex)) {
- std::cout << "test_simplex_tree_contains - vertex=" << vertex << "||" << simplex.first.at(simplexIndex) << std::endl;
+ std::clog << "test_simplex_tree_contains - vertex=" << vertex << "||" << simplex.first.at(simplexIndex) << std::endl;
BOOST_CHECK(vertex == simplex.first.at(simplexIndex));
BOOST_CHECK(simplexIndex >= 0);
simplexIndex--;
@@ -163,7 +165,7 @@ void set_and_test_simplex_tree_dim_fil(typeST& simplexTree, int vectorSize, cons
if (vectorSize > dim_max + 1) {
dim_max = vectorSize - 1;
simplexTree.set_dimension(dim_max);
- std::cout << " set_and_test_simplex_tree_dim_fil - dim_max=" << dim_max
+ std::clog << " set_and_test_simplex_tree_dim_fil - dim_max=" << dim_max
<< std::endl;
}
@@ -193,12 +195,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
dim_max = -1;
// TEST OF INSERTION
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF INSERTION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF INSERTION" << std::endl;
typeST st;
// ++ FIRST
- std::cout << " - INSERT 0" << std::endl;
+ std::clog << " - INSERT 0" << std::endl;
typeVectorVertex firstSimplexVector{0};
BOOST_CHECK(firstSimplexVector.size() == 1);
typeSimplex firstSimplex = std::make_pair(firstSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
@@ -209,7 +211,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 1);
// ++ SECOND
- std::cout << " - INSERT 1" << std::endl;
+ std::clog << " - INSERT 1" << std::endl;
typeVectorVertex secondSimplexVector{1};
BOOST_CHECK(secondSimplexVector.size() == 1);
typeSimplex secondSimplex = std::make_pair(secondSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
@@ -220,7 +222,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 2);
// ++ THIRD
- std::cout << " - INSERT (0,1)" << std::endl;
+ std::clog << " - INSERT (0,1)" << std::endl;
typeVectorVertex thirdSimplexVector{0, 1};
BOOST_CHECK(thirdSimplexVector.size() == 2);
typeSimplex thirdSimplex = std::make_pair(thirdSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
@@ -231,7 +233,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 2); // Not incremented !!
// ++ FOURTH
- std::cout << " - INSERT 2" << std::endl;
+ std::clog << " - INSERT 2" << std::endl;
typeVectorVertex fourthSimplexVector{2};
BOOST_CHECK(fourthSimplexVector.size() == 1);
typeSimplex fourthSimplex = std::make_pair(fourthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
@@ -242,7 +244,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 3);
// ++ FIFTH
- std::cout << " - INSERT (2,0)" << std::endl;
+ std::clog << " - INSERT (2,0)" << std::endl;
typeVectorVertex fifthSimplexVector{2, 0};
BOOST_CHECK(fifthSimplexVector.size() == 2);
typeSimplex fifthSimplex = std::make_pair(fifthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
@@ -253,7 +255,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !!
// ++ SIXTH
- std::cout << " - INSERT (2,1)" << std::endl;
+ std::clog << " - INSERT (2,1)" << std::endl;
typeVectorVertex sixthSimplexVector{2, 1};
BOOST_CHECK(sixthSimplexVector.size() == 2);
typeSimplex sixthSimplex = std::make_pair(sixthSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
@@ -264,7 +266,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !!
// ++ SEVENTH
- std::cout << " - INSERT (2,1,0)" << std::endl;
+ std::clog << " - INSERT (2,1,0)" << std::endl;
typeVectorVertex seventhSimplexVector{2, 1, 0};
BOOST_CHECK(seventhSimplexVector.size() == 3);
typeSimplex seventhSimplex = std::make_pair(seventhSimplexVector, Filtration_value(THIRD_FILTRATION_VALUE));
@@ -275,7 +277,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 3); // Not incremented !!
// ++ EIGHTH
- std::cout << " - INSERT 3" << std::endl;
+ std::clog << " - INSERT 3" << std::endl;
typeVectorVertex eighthSimplexVector{3};
BOOST_CHECK(eighthSimplexVector.size() == 1);
typeSimplex eighthSimplex = std::make_pair(eighthSimplexVector, Filtration_value(FIRST_FILTRATION_VALUE));
@@ -285,8 +287,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
set_and_test_simplex_tree_dim_fil(st, eighthSimplexVector.size(), eighthSimplex.second);
BOOST_CHECK(st.num_vertices() == (size_t) 4);
- // ++ NINETH
- std::cout << " - INSERT (3,0)" << std::endl;
+ // ++ NINTH
+ std::clog << " - INSERT (3,0)" << std::endl;
typeVectorVertex ninethSimplexVector{3, 0};
BOOST_CHECK(ninethSimplexVector.size() == 2);
typeSimplex ninethSimplex = std::make_pair(ninethSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
@@ -297,7 +299,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
BOOST_CHECK(st.num_vertices() == (size_t) 4); // Not incremented !!
// ++ TENTH
- std::cout << " - INSERT 0 (already inserted)" << std::endl;
+ std::clog << " - INSERT 0 (already inserted)" << std::endl;
typeVectorVertex tenthSimplexVector{0};
BOOST_CHECK(tenthSimplexVector.size() == 1);
// With a different filtration value
@@ -308,12 +310,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
// Simplex_handle = boost::container::flat_map< typeST::Vertex_handle, Node >::iterator
typename typeST::Simplex_handle shReturned = returnValue.first;
BOOST_CHECK(shReturned == typename typeST::Simplex_handle(nullptr));
- std::cout << "st.num_vertices()=" << st.num_vertices() << std::endl;
+ std::clog << "st.num_vertices()=" << st.num_vertices() << std::endl;
BOOST_CHECK(st.num_vertices() == (size_t) 4); // Not incremented !!
BOOST_CHECK(st.dimension() == dim_max);
// ++ ELEVENTH
- std::cout << " - INSERT (2,1,0) (already inserted)" << std::endl;
+ std::clog << " - INSERT (2,1,0) (already inserted)" << std::endl;
typeVectorVertex eleventhSimplexVector{2, 1, 0};
BOOST_CHECK(eleventhSimplexVector.size() == 3);
typeSimplex eleventhSimplex = std::make_pair(eleventhSimplexVector, Filtration_value(FOURTH_FILTRATION_VALUE));
@@ -343,35 +345,35 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
// [0.2] 3 0
// [0.3] 2 1 0
// !! Be careful, simplex are sorted by filtration value on insertion !!
- std::cout << "simplex_tree_insertion - first - 0" << std::endl;
+ std::clog << "simplex_tree_insertion - first - 0" << std::endl;
test_simplex_tree_contains(st, firstSimplex, 0); // (0) -> 0
- std::cout << "simplex_tree_insertion - second - 1" << std::endl;
+ std::clog << "simplex_tree_insertion - second - 1" << std::endl;
test_simplex_tree_contains(st, secondSimplex, 1); // (1) -> 1
- std::cout << "simplex_tree_insertion - third - 4" << std::endl;
+ std::clog << "simplex_tree_insertion - third - 4" << std::endl;
test_simplex_tree_contains(st, thirdSimplex, 4); // (0,1) -> 4
- std::cout << "simplex_tree_insertion - fourth - 2" << std::endl;
+ std::clog << "simplex_tree_insertion - fourth - 2" << std::endl;
test_simplex_tree_contains(st, fourthSimplex, 2); // (2) -> 2
- std::cout << "simplex_tree_insertion - fifth - 5" << std::endl;
+ std::clog << "simplex_tree_insertion - fifth - 5" << std::endl;
test_simplex_tree_contains(st, fifthSimplex, 5); // (2,0) -> 5
- std::cout << "simplex_tree_insertion - sixth - 6" << std::endl;
+ std::clog << "simplex_tree_insertion - sixth - 6" << std::endl;
test_simplex_tree_contains(st, sixthSimplex, 6); //(2,1) -> 6
- std::cout << "simplex_tree_insertion - seventh - 8" << std::endl;
+ std::clog << "simplex_tree_insertion - seventh - 8" << std::endl;
test_simplex_tree_contains(st, seventhSimplex, 8); // (2,1,0) -> 8
- std::cout << "simplex_tree_insertion - eighth - 3" << std::endl;
+ std::clog << "simplex_tree_insertion - eighth - 3" << std::endl;
test_simplex_tree_contains(st, eighthSimplex, 3); // (3) -> 3
- std::cout << "simplex_tree_insertion - nineth - 7" << std::endl;
+ std::clog << "simplex_tree_insertion - ninth - 7" << std::endl;
test_simplex_tree_contains(st, ninethSimplex, 7); // (3,0) -> 7
// Display the Simplex_tree - Can not be done in the middle of 2 inserts
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
- std::cout << " - dimension " << st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << " - dimension " << st.dimension() << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
@@ -380,14 +382,14 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
typedef std::pair<typename typeST::Simplex_handle, bool> typePairSimplexBool;
typedef std::vector<typename typeST::Vertex_handle> typeVectorVertex;
typedef std::pair<typeVectorVertex, typename typeST::Filtration_value> typeSimplex;
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST OF RECURSIVE INSERTION" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF RECURSIVE INSERTION" << std::endl;
typeST st;
typePairSimplexBool returnValue;
int position = 0;
// ++ FIRST
- std::cout << " - INSERT (2,1,0)" << std::endl;
+ std::clog << " - INSERT (2,1,0)" << std::endl;
typeVectorVertex SimplexVector1{2, 1, 0};
BOOST_CHECK(SimplexVector1.size() == 3);
returnValue = st.insert_simplex_and_subfaces(SimplexVector1);
@@ -400,13 +402,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
std::sort(SimplexVector1.begin(), SimplexVector1.end(), std::greater<typename typeST::Vertex_handle>());
for (auto vertex : st.simplex_vertex_range(returnValue.first)) {
// Check returned Simplex_handle
- std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector1[position] << std::endl;
+ std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector1[position] << std::endl;
BOOST_CHECK(vertex == SimplexVector1[position]);
position++;
}
// ++ SECOND
- std::cout << " - INSERT 3" << std::endl;
+ std::clog << " - INSERT 3" << std::endl;
typeVectorVertex SimplexVector2{3};
BOOST_CHECK(SimplexVector2.size() == 1);
returnValue = st.insert_simplex_and_subfaces(SimplexVector2);
@@ -419,13 +421,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
std::sort(SimplexVector2.begin(), SimplexVector2.end(), std::greater<typename typeST::Vertex_handle>());
for (auto vertex : st.simplex_vertex_range(returnValue.first)) {
// Check returned Simplex_handle
- std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector2[position] << std::endl;
+ std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector2[position] << std::endl;
BOOST_CHECK(vertex == SimplexVector2[position]);
position++;
}
// ++ THIRD
- std::cout << " - INSERT (0,3)" << std::endl;
+ std::clog << " - INSERT (0,3)" << std::endl;
typeVectorVertex SimplexVector3{3, 0};
BOOST_CHECK(SimplexVector3.size() == 2);
returnValue = st.insert_simplex_and_subfaces(SimplexVector3);
@@ -438,13 +440,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
std::sort(SimplexVector3.begin(), SimplexVector3.end(), std::greater<typename typeST::Vertex_handle>());
for (auto vertex : st.simplex_vertex_range(returnValue.first)) {
// Check returned Simplex_handle
- std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector3[position] << std::endl;
+ std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector3[position] << std::endl;
BOOST_CHECK(vertex == SimplexVector3[position]);
position++;
}
// ++ FOURTH
- std::cout << " - INSERT (1,0) (already inserted)" << std::endl;
+ std::clog << " - INSERT (1,0) (already inserted)" << std::endl;
typeVectorVertex SimplexVector4{1, 0};
BOOST_CHECK(SimplexVector4.size() == 2);
returnValue = st.insert_simplex_and_subfaces(SimplexVector4);
@@ -455,7 +457,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
BOOST_CHECK(false == returnValue.second);
// ++ FIFTH
- std::cout << " - INSERT (3,4,5)" << std::endl;
+ std::clog << " - INSERT (3,4,5)" << std::endl;
typeVectorVertex SimplexVector5{3, 4, 5};
BOOST_CHECK(SimplexVector5.size() == 3);
returnValue = st.insert_simplex_and_subfaces(SimplexVector5);
@@ -468,13 +470,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
std::sort(SimplexVector5.begin(), SimplexVector5.end(), std::greater<typename typeST::Vertex_handle>());
for (auto vertex : st.simplex_vertex_range(returnValue.first)) {
// Check returned Simplex_handle
- std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector5[position] << std::endl;
+ std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector5[position] << std::endl;
BOOST_CHECK(vertex == SimplexVector5[position]);
position++;
}
// ++ SIXTH
- std::cout << " - INSERT (0,1,6,7)" << std::endl;
+ std::clog << " - INSERT (0,1,6,7)" << std::endl;
typeVectorVertex SimplexVector6{0, 1, 6, 7};
BOOST_CHECK(SimplexVector6.size() == 4);
returnValue = st.insert_simplex_and_subfaces(SimplexVector6);
@@ -487,7 +489,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
std::sort(SimplexVector6.begin(), SimplexVector6.end(), std::greater<typename typeST::Vertex_handle>());
for (auto vertex : st.simplex_vertex_range(returnValue.first)) {
// Check returned Simplex_handle
- std::cout << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector6[position] << std::endl;
+ std::clog << "vertex = " << vertex << " | vector[" << position << "] = " << SimplexVector6[position] << std::endl;
BOOST_CHECK(vertex == SimplexVector6[position]);
position++;
}
@@ -525,63 +527,63 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(NSimplexAndSubfaces_tree_insertion, typeST, list_o
// ------------------------------------------------------------------------------------------------------------------
typeVectorVertex simpleSimplexVector{1};
typename typeST::Simplex_handle simplexFound = st.find(simpleSimplexVector);
- std::cout << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {1} IN THE SIMPLEX TREE ?\n";
if (simplexFound != st.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
// Check it is found
BOOST_CHECK(simplexFound != st.null_simplex());
typeVectorVertex unknownSimplexVector{15};
simplexFound = st.find(unknownSimplexVector);
- std::cout << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {15} IN THE SIMPLEX TREE ?\n";
if (simplexFound != st.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
// Check it is NOT found
BOOST_CHECK(simplexFound == st.null_simplex());
simplexFound = st.find(SimplexVector6);
- std::cout << "**************IS THE SIMPLEX {0,1,6,7} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {0,1,6,7} IN THE SIMPLEX TREE ?\n";
if (simplexFound != st.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
// Check it is found
BOOST_CHECK(simplexFound != st.null_simplex());
typeVectorVertex otherSimplexVector{1, 15};
simplexFound = st.find(otherSimplexVector);
- std::cout << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {15,1} IN THE SIMPLEX TREE ?\n";
if (simplexFound != st.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
// Check it is NOT found
BOOST_CHECK(simplexFound == st.null_simplex());
typeVectorVertex invSimplexVector{1, 2, 0};
simplexFound = st.find(invSimplexVector);
- std::cout << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n";
+ std::clog << "**************IS THE SIMPLEX {1,2,0} IN THE SIMPLEX TREE ?\n";
if (simplexFound != st.null_simplex())
- std::cout << "***+ YES IT IS!\n";
+ std::clog << "***+ YES IT IS!\n";
else
- std::cout << "***- NO IT ISN'T\n";
+ std::clog << "***- NO IT ISN'T\n";
// Check it is found
BOOST_CHECK(simplexFound != st.null_simplex());
// Display the Simplex_tree - Can not be done in the middle of 2 inserts
- std::cout << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
- std::cout << " - dimension " << st.dimension() << std::endl;
- std::cout << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
+ std::clog << "The complex contains " << st.num_simplices() << " simplices" << std::endl;
+ std::clog << " - dimension " << st.dimension() << std::endl;
+ std::clog << std::endl << std::endl << "Iterator on Simplices in the filtration, with [filtration value]:" << std::endl;
for (auto f_simplex : st.filtration_simplex_range()) {
- std::cout << " " << "[" << st.filtration(f_simplex) << "] ";
+ std::clog << " " << "[" << st.filtration(f_simplex) << "] ";
for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::cout << (int) vertex << " ";
+ std::clog << (int) vertex << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
@@ -595,17 +597,17 @@ void test_cofaces(typeST& st, const std::vector<Vertex_handle>& expected, int di
for (auto simplex = cofaces.begin(); simplex != cofaces.end(); ++simplex) {
typename typeST::Simplex_vertex_range rg = st.simplex_vertex_range(*simplex);
for (auto vertex = rg.begin(); vertex != rg.end(); ++vertex) {
- std::cout << "(" << *vertex << ")";
+ std::clog << "(" << *vertex << ")";
}
- std::cout << std::endl;
+ std::clog << std::endl;
BOOST_CHECK(std::find(res.begin(), res.end(), *simplex) != res.end());
}
}
BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_variants) {
typedef std::vector<typename typeST::Vertex_handle> typeVectorVertex;
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST COFACE ALGORITHM" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST COFACE ALGORITHM" << std::endl;
typeST st;
typeVectorVertex SimplexVector{2, 1, 0};
@@ -631,7 +633,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var
std::vector<typename typeST::Vertex_handle> simplex_result;
std::vector<typename typeST::Simplex_handle> result;
- std::cout << "First test - Star of (3):" << std::endl;
+ std::clog << "First test - Star of (3):" << std::endl;
simplex_result = {3};
result.push_back(st.find(simplex_result));
@@ -656,7 +658,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var
vertex.push_back(1);
vertex.push_back(7);
- std::cout << "Second test - Star of (1,7): " << std::endl;
+ std::clog << "Second test - Star of (1,7): " << std::endl;
simplex_result = {7, 1};
result.push_back(st.find(simplex_result));
@@ -673,7 +675,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var
test_cofaces(st, vertex, 0, result);
result.clear();
- std::cout << "Third test - 2-dimension Cofaces of simplex(1,7) : " << std::endl;
+ std::clog << "Third test - 2-dimension Cofaces of simplex(1,7) : " << std::endl;
simplex_result = {7, 1, 0};
result.push_back(st.find(simplex_result));
@@ -684,15 +686,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var
test_cofaces(st, vertex, 1, result);
result.clear();
- std::cout << "Cofaces with a codimension too high (codimension + vetices > tree.dimension) :" << std::endl;
+ std::clog << "Cofaces with a codimension too high (codimension + vetices > tree.dimension) :" << std::endl;
test_cofaces(st, vertex, 5, result);
- //std::cout << "Cofaces with an empty codimension" << std::endl;
+ //std::clog << "Cofaces with an empty codimension" << std::endl;
//test_cofaces(st, vertex, -1, result);
- // std::cout << "Cofaces in an empty simplex tree" << std::endl;
+ // std::clog << "Cofaces in an empty simplex tree" << std::endl;
// typeST empty_tree;
// test_cofaces(empty_tree, vertex, 1, result);
- //std::cout << "Cofaces of an empty simplex" << std::endl;
+ //std::clog << "Cofaces of an empty simplex" << std::endl;
//vertex.clear();
// test_cofaces(st, vertex, 1, result);
@@ -700,8 +702,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(coface_on_simplex_tree, typeST, list_of_tested_var
BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_variants) {
typedef std::vector<typename typeST::Vertex_handle> typeVectorVertex;
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST COPY MOVE CONSTRUCTORS" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST COPY MOVE CONSTRUCTORS" << std::endl;
typeST st;
typeVectorVertex SimplexVector{2, 1, 0};
@@ -725,11 +727,11 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_
/* o */
/* 5 */
- std::cout << "Printing st - address = " << &st << std::endl;
+ std::clog << "Printing st - address = " << &st << std::endl;
// Copy constructor
typeST st_copy = st;
- std::cout << "Printing a copy of st - address = " << &st_copy << std::endl;
+ std::clog << "Printing a copy of st - address = " << &st_copy << std::endl;
// Check the data are the same
BOOST_CHECK(st == st_copy);
@@ -738,7 +740,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_
// Move constructor
typeST st_move = std::move(st);
- std::cout << "Printing a move of st - address = " << &st_move << std::endl;
+ std::clog << "Printing a move of st - address = " << &st_move << std::endl;
// Check the data are the same
BOOST_CHECK(st_move == st_copy);
@@ -753,7 +755,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(copy_move_on_simplex_tree, typeST, list_of_tested_
BOOST_CHECK(st.num_simplices() == 0);
BOOST_CHECK(st.num_vertices() == (size_t)0);
- std::cout << "Printing st once again- address = " << &st << std::endl;
+ std::clog << "Printing st once again- address = " << &st << std::endl;
}
template<class typeST>
@@ -768,22 +770,22 @@ void test_simplex_is_vertex(typeST& st, typename typeST::Simplex_handle sh, type
BOOST_AUTO_TEST_CASE(non_contiguous) {
typedef Simplex_tree<> typeST;
typedef typeST::Simplex_handle Simplex_handle;
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST NON-CONTIGUOUS VERTICES" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST NON-CONTIGUOUS VERTICES" << std::endl;
typeST st;
typeST::Vertex_handle e[] = {3,-7};
- std::cout << "Insert" << std::endl;
+ std::clog << "Insert" << std::endl;
st.insert_simplex_and_subfaces(e);
BOOST_CHECK(st.num_vertices() == 2);
BOOST_CHECK(st.num_simplices() == 3);
- std::cout << "Find" << std::endl;
+ std::clog << "Find" << std::endl;
Simplex_handle sh = st.find(e);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "Endpoints" << std::endl;
+ std::clog << "Endpoints" << std::endl;
auto p = st.endpoints(sh);
test_simplex_is_vertex(st, p.first, 3);
test_simplex_is_vertex(st, p.second, -7);
- std::cout << "Boundary" << std::endl;
+ std::clog << "Boundary" << std::endl;
auto&& b = st.boundary_simplex_range(sh);
auto i = std::begin(b);
test_simplex_is_vertex(st, *i, -7);
@@ -791,90 +793,6 @@ BOOST_AUTO_TEST_CASE(non_contiguous) {
BOOST_CHECK(++i == std::end(b));
}
-BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "MAKE FILTRATION NON DECREASING" << std::endl;
- typedef Simplex_tree<> typeST;
- typeST st;
-
- st.insert_simplex_and_subfaces({2, 1, 0}, 2.0);
- st.insert_simplex_and_subfaces({3, 0}, 2.0);
- st.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
-
- /* Inserted simplex: */
- /* 1 */
- /* o */
- /* /X\ */
- /* o---o---o---o */
- /* 2 0 3\X/4 */
- /* o */
- /* 5 */
-
- std::cout << "Check default insertion ensures the filtration values are non decreasing" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
-
- // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0
- // to 1.0
- st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
-
- // Inserted simplex:
- // 1 6
- // o---o
- // /X\7/
- // o---o---o---o
- // 2 0 3\X/4
- // o
- // 5
-
- std::cout << "Check default second insertion ensures the filtration values are non decreasing" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
-
- // Copy original simplex tree
- typeST st_copy = st;
-
- // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing
- st.assign_filtration(st.find({0,1,6,7}), 0.8);
- st.assign_filtration(st.find({0,1,6}), 0.9);
- st.assign_filtration(st.find({0,6}), 0.6);
- st.assign_filtration(st.find({3,4,5}), 1.2);
- st.assign_filtration(st.find({3,4}), 1.1);
- st.assign_filtration(st.find({4,5}), 1.99);
-
- std::cout << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl;
- BOOST_CHECK(st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_copy);
-
- // Other simplex tree
- typeST st_other;
- st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st
- st_other.insert_simplex_and_subfaces({3, 0}, 2.0);
- st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
- st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
-
- // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing
- st.assign_filtration(st.find({2}), 3.0);
- // By modifying just the simplex {2}
- // {0,1,2}, {1,2} and {0,2} will be modified
-
- std::cout << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl;
- BOOST_CHECK(st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_other);
-
- // Modify specific values for st still to be non-decreasing
- st.assign_filtration(st.find({0,1,2}), 10.0);
- st.assign_filtration(st.find({0,2}), 9.0);
- st.assign_filtration(st.find({0,1,6,7}), 50.0);
- st.assign_filtration(st.find({0,1,6}), 49.0);
- st.assign_filtration(st.find({0,1,7}), 48.0);
- // Other copy simplex tree
- typeST st_other_copy = st;
-
- std::cout << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_other_copy);
-
-}
-
typedef boost::mpl::list<boost::adjacency_list<boost::setS, boost::vecS, boost::directedS,
boost::property<vertex_filtration_t, double>,
@@ -896,8 +814,8 @@ typedef boost::mpl::list<boost::adjacency_list<boost::setS, boost::vecS, boost::
boost::property<edge_filtration_t, double>>> list_of_graph_variants;
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insert_graph, Graph, list_of_graph_variants) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "INSERT GRAPH" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "INSERT GRAPH" << std::endl;
Graph g(3);
// filtration value 0 everywhere
@@ -924,18 +842,18 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insert_graph, Graph, list_of_graph_va
st2.insert_graph(g);
BOOST_CHECK(st2.num_simplices() == 6);
- std::cout << "st1 is" << std::endl;
- std::cout << st1 << std::endl;
+ std::clog << "st1 is" << std::endl;
+ std::clog << st1 << std::endl;
- std::cout << "st2 is" << std::endl;
- std::cout << st2 << std::endl;
+ std::clog << "st2 is" << std::endl;
+ std::clog << st2 << std::endl;
BOOST_CHECK(st1 == st2);
}
BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested_variants) {
- std::cout << "********************************************************************" << std::endl;
- std::cout << "TEST INSERT DUPLICATED VERTICES" << std::endl;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST INSERT DUPLICATED VERTICES" << std::endl;
typeST st;
typename typeST::Simplex_handle sh;
@@ -943,25 +861,25 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested
std::tie(sh, success) = st.insert_simplex_and_subfaces({1});
BOOST_CHECK(success);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
+ std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
BOOST_CHECK(st.dimension(sh) == 0);
std::tie(sh, success) = st.insert_simplex_and_subfaces({2, 2});
BOOST_CHECK(success);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
+ std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
BOOST_CHECK(st.dimension(sh) == 0);
std::tie(sh, success) = st.insert_simplex_and_subfaces({3, 3, 3});
BOOST_CHECK(success);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
+ std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
BOOST_CHECK(st.dimension(sh) == 0);
std::tie(sh, success) = st.insert_simplex_and_subfaces({4, 4, 4, 4});
BOOST_CHECK(success);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
+ std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
BOOST_CHECK(st.dimension(sh) == 0);
- std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
+ std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
<< " - num_simplices = " << st.num_simplices() << std::endl;
BOOST_CHECK(st.dimension() == 0);
BOOST_CHECK(st.num_simplices() == st.num_vertices());
@@ -969,10 +887,10 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested
std::tie(sh, success) = st.insert_simplex_and_subfaces({2, 1, 1, 2});
BOOST_CHECK(success);
BOOST_CHECK(sh != st.null_simplex());
- std::cout << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
+ std::clog << "st.dimension(sh)= " << st.dimension(sh) << std::endl;
BOOST_CHECK(st.dimension(sh) == 1);
- std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
+ std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
<< " - num_simplices = " << st.num_simplices() << std::endl;
BOOST_CHECK(st.dimension() == 1);
BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1);
@@ -982,9 +900,155 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested
BOOST_CHECK(!success);
BOOST_CHECK(sh == st.null_simplex());
- std::cout << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
+ std::clog << "dimension =" << st.dimension() << " - num_vertices = " << st.num_vertices()
<< " - num_simplices = " << st.num_simplices() << std::endl;
BOOST_CHECK(st.dimension() == 1);
BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1);
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(generators, typeST, list_of_tested_variants) {
+ std::cout << "********************************************************************" << std::endl;
+ std::cout << "TEST FIND GENERATORS" << std::endl;
+ {
+ typeST st;
+ st.insert_simplex_and_subfaces({0,1,2,3,4,5,6},0);
+ st.assign_filtration(st.find({0,2,4}), 10);
+ st.assign_filtration(st.find({1,5}), 20);
+ st.assign_filtration(st.find({1,2,4}), 30);
+ st.assign_filtration(st.find({3}), 5);
+ st.make_filtration_non_decreasing();
+ BOOST_CHECK(st.filtration(st.find({1,2}))==0);
+ BOOST_CHECK(st.filtration(st.find({0,1,2,3,4}))==30);
+ BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,1,2,3,4,5}))==st.find({1,2,4}));
+ BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,2,3}))==st.find({3}));
+ auto s=st.minimal_simplex_with_same_filtration(st.find({0,2,6}));
+ BOOST_CHECK(s==st.find({0})||s==st.find({2})||s==st.find({6}));
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({2}))==2);
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({1,5}))==st.null_vertex());
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({5,6}))>=5);
+ }
+ {
+ typeST st;
+ st.insert_simplex_and_subfaces({0,1}, 8);
+ st.insert_simplex_and_subfaces({0,2}, 10);
+ st.insert_simplex_and_subfaces({3,4}, 6);
+ st.insert_simplex_and_subfaces({1,2}, 5);
+ st.insert_simplex_and_subfaces({1,5}, 4);
+ st.insert_simplex_and_subfaces({0,5}, 3);
+ st.insert_simplex_and_subfaces({2,5}, 2);
+ st.insert_simplex_and_subfaces({1,3}, 9);
+ st.expansion(50);
+ BOOST_CHECK(st.edge_with_same_filtration(st.find({0,1,2,5}))==st.find({0,2}));
+ BOOST_CHECK(st.edge_with_same_filtration(st.find({1,5}))==st.find({1,5}));
+ }
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_reset_filtration, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST RESET FILTRATION" << std::endl;
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 3.);
+ st.insert_simplex_and_subfaces({3, 0}, 2.);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 3.);
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 4.);
+ /* Inserted simplex: */
+ /* 1 6 */
+ /* o---o */
+ /* /X\7/ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+
+ for (auto f_simplex : st.skeleton_simplex_range(3)) {
+ std::clog << "vertex = (";
+ for (auto vertex : st.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << ",";
+ }
+ std::clog << ") - filtration = " << st.filtration(f_simplex);
+ std::clog << " - dimension = " << st.dimension(f_simplex) << std::endl;
+ // Guaranteed by construction
+ BOOST_CHECK(st.filtration(f_simplex) >= 2.);
+ }
+
+ // dimension until 5 even if simplex tree is of dimension 3 to test the limits
+ for(int dimension = 5; dimension >= 0; dimension --) {
+ std::clog << "### reset_filtration - dimension = " << dimension << "\n";
+ st.reset_filtration(0., dimension);
+ for (auto f_simplex : st.skeleton_simplex_range(3)) {
+ std::clog << "vertex = (";
+ for (auto vertex : st.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << ",";
+ }
+ std::clog << ") - filtration = " << st.filtration(f_simplex);
+ std::clog << " - dimension = " << st.dimension(f_simplex) << std::endl;
+ if (st.dimension(f_simplex) < dimension)
+ BOOST_CHECK(st.filtration(f_simplex) >= 2.);
+ else
+ BOOST_CHECK(st.filtration(f_simplex) == 0.);
+ }
+ }
+
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_boundaries_and_opposite_vertex_iterator, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF BOUNDARIES AND OPPOSITE VERTEX ITERATORS" << std::endl;
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 3.);
+ st.insert_simplex_and_subfaces({3, 0}, 2.);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 3.);
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 4.);
+
+ /* Inserted simplex: */
+ /* 1 6 */
+ /* o---o */
+ /* /X\7/ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+ using Simplex = std::vector<typename typeST::Vertex_handle>;
+ // simplices must be kept sorted by vertex number for std::vector to use operator== - cf. last BOOST_CHECK
+ std::vector<Simplex> simplices = {{0, 1, 2}, {0, 3}, {0, 1, 6, 7}, {3, 4, 5}, {3, 5}, {2}};
+ for (auto simplex : simplices) {
+ Simplex opposite_vertices;
+ for(auto boundary_and_opposite_vertex : st.boundary_opposite_vertex_simplex_range(st.find(simplex))) {
+ Simplex output;
+ for (auto vertex : st.simplex_vertex_range(boundary_and_opposite_vertex.first)) {
+ std::clog << vertex << " ";
+ output.emplace_back(vertex);
+ }
+ std::clog << " - opposite vertex = " << boundary_and_opposite_vertex.second << std::endl;
+ // Check that boundary simplex + opposite vertex = simplex given as input
+ output.emplace_back(boundary_and_opposite_vertex.second);
+ std::sort(output.begin(), output.end());
+ BOOST_CHECK(simplex == output);
+ opposite_vertices.emplace_back(boundary_and_opposite_vertex.second);
+ }
+ // Check that the list of all opposite vertices = simplex given as input
+ // no opposite vertices if simplex given as input is of dimension 1
+ std::sort(opposite_vertices.begin(), opposite_vertices.end());
+ if (simplex.size() > 1)
+ BOOST_CHECK(simplex == opposite_vertices);
+ else
+ BOOST_CHECK(opposite_vertices.size() == 0);
+ }
+}
+
+BOOST_AUTO_TEST_CASE(batch_vertices) {
+ typedef Simplex_tree<> typeST;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST BATCH VERTEX INSERTION" << std::endl;
+ typeST st;
+ st.insert_simplex_and_subfaces({3}, 1.5);
+ std::vector verts { 2, 3, 5, 6 };
+ st.insert_batch_vertices(verts);
+ BOOST_CHECK(st.num_vertices() == 4);
+ BOOST_CHECK(st.num_simplices() == 4);
+ BOOST_CHECK(st.filtration(st.find({2})) == 0.);
+ BOOST_CHECK(st.filtration(st.find({3})) == 1.5);
}
diff --git a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
index 0c2014bd..23eb3670 100644
--- a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
+++ b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
@@ -29,7 +29,7 @@ struct SkeletonBlockerDS {
/**
* @brief Root_vertex_handle and Vertex_handle are similar to global and local vertex descriptor
- * used in <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>
+ * used in <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>
* and allow to localize a vertex of a subcomplex on its parent root complex.
*
* In gross, vertices are stored in a vector
diff --git a/src/Skeleton_blocker/example/CMakeLists.txt b/src/Skeleton_blocker/example/CMakeLists.txt
index 0e5d2f11..456612df 100644
--- a/src/Skeleton_blocker/example/CMakeLists.txt
+++ b/src/Skeleton_blocker/example/CMakeLists.txt
@@ -7,7 +7,3 @@ add_executable(Skeleton_blocker_example_link Skeleton_blocker_link.cpp)
add_test(NAME Skeleton_blocker_example_from_simplices COMMAND $<TARGET_FILE:Skeleton_blocker_example_from_simplices>)
add_test(NAME Skeleton_blocker_example_iteration COMMAND $<TARGET_FILE:Skeleton_blocker_example_iteration>)
add_test(NAME Skeleton_blocker_example_link COMMAND $<TARGET_FILE:Skeleton_blocker_example_link>)
-
-install(TARGETS Skeleton_blocker_example_from_simplices DESTINATION bin)
-install(TARGETS Skeleton_blocker_example_iteration DESTINATION bin)
-install(TARGETS Skeleton_blocker_example_link DESTINATION bin)
diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp
index 486827eb..d04ca289 100644
--- a/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp
+++ b/src/Skeleton_blocker/example/Skeleton_blocker_from_simplices.cpp
@@ -35,13 +35,13 @@ int main(int argc, char *argv[]) {
Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces<Complex>(simplices.begin(), simplices.end()));
- std::cout << "Simplices:" << std::endl;
+ std::clog << "Simplices:" << std::endl;
for (const Simplex & s : complex.complex_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
+ std::clog << s << " ";
+ std::clog << std::endl;
// One blocker as simplex 0123 is not in the complex but all its proper faces are.
- std::cout << "Blockers: " << complex.blockers_to_string() << std::endl;
+ std::clog << "Blockers: " << complex.blockers_to_string() << std::endl;
// now build a complex from its full list of simplices
simplices.clear();
@@ -53,13 +53,13 @@ int main(int argc, char *argv[]) {
simplices.push_back(Simplex(Vertex_handle(2), Vertex_handle(0)));
complex = Complex(simplices.begin(), simplices.end());
- std::cout << "Simplices:" << std::endl;
+ std::clog << "Simplices:" << std::endl;
for (const Simplex & s : complex.complex_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
+ std::clog << s << " ";
+ std::clog << std::endl;
// One blocker as simplex 012 is not in the complex but all its proper faces are.
- std::cout << "Blockers: " << complex.blockers_to_string() << std::endl;
+ std::clog << "Blockers: " << complex.blockers_to_string() << std::endl;
return EXIT_SUCCESS;
}
diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp
index 7f301047..62084692 100644
--- a/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp
+++ b/src/Skeleton_blocker/example/Skeleton_blocker_iteration.cpp
@@ -45,7 +45,7 @@ int main(int argc, char *argv[]) {
// more appropriated!
unsigned num_vertices = 0;
for (auto v : complex.vertex_range()) {
- std::cout << "Vertex " << v << std::endl;
+ std::clog << "Vertex " << v << std::endl;
++num_vertices;
}
@@ -65,9 +65,9 @@ int main(int argc, char *argv[]) {
else
euler -= 1;
}
- std::cout << "Saw " << num_vertices << " vertices, " << num_edges << " edges and " << num_simplices << " simplices"
+ std::clog << "Saw " << num_vertices << " vertices, " << num_edges << " edges and " << num_simplices << " simplices"
<< std::endl;
- std::cout << "The Euler Characteristic is " << euler << std::endl;
- std::cout << skbl_chrono;
+ std::clog << "The Euler Characteristic is " << euler << std::endl;
+ std::clog << skbl_chrono;
return EXIT_SUCCESS;
}
diff --git a/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp b/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp
index e634b656..ba7ce43c 100644
--- a/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp
+++ b/src/Skeleton_blocker/example/Skeleton_blocker_link.cpp
@@ -32,25 +32,25 @@ int main(int argc, char *argv[]) {
Simplex tetrahedron(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3));
complex.add_simplex(tetrahedron);
- std::cout << "complex:" << complex.to_string() << std::endl;
+ std::clog << "complex:" << complex.to_string() << std::endl;
// build the link of vertex 1, eg a triangle {0,2,3}
auto link = complex.link(Vertex_handle(1));
- std::cout << "link:" << link.to_string() << std::endl;
+ std::clog << "link:" << link.to_string() << std::endl;
// Internally link is a subcomplex of 'complex' and its vertices are stored in a vector.
// They can be accessed via Vertex_handle(x) where x is an index of the vector.
// In that example, link has three vertices and thus it contains only
// Vertex_handle(0),Vertex_handle(1) and Vertex_handle(2) are).
for (int i = 0; i < 5; ++i)
- std::cout << "link.contains_vertex(Vertex_handle(" << i << ")):" << link.contains_vertex(Vertex_handle(i)) <<
+ std::clog << "link.contains_vertex(Vertex_handle(" << i << ")):" << link.contains_vertex(Vertex_handle(i)) <<
std::endl;
- std::cout << std::endl;
+ std::clog << std::endl;
// To access to the initial vertices eg (0,1,2,3,4), Root_vertex_handle must be used.
// For instance, to test if the link contains the vertex that was labeled i:
for (int i = 0; i < 5; ++i)
- std::cout << "link.contains_vertex(Root_vertex_handle(" << i << ")):" <<
+ std::clog << "link.contains_vertex(Root_vertex_handle(" << i << ")):" <<
link.contains_vertex(Root_vertex_handle(i)) << std::endl;
return EXIT_SUCCESS;
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
index bcca851f..0fd56c67 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
@@ -52,8 +52,7 @@ when \f$ \tau \neq \sigma\f$ we say that \f$ \tau\f$ is a proper-face of \f$ \si
An abstract simplicial complex is a set of simplices that contains all the faces of its simplices.
The 1-skeleton of a simplicial complex (or its graph) consists of its elements of dimension lower than 2.
- *\image html "ds_representation.png" "Skeleton-blocker representation" width=20cm
-
+\image html "ds_representation.png" "Skeleton-blocker representation"
To encode, a simplicial complex, one can encodes all its simplices.
In case when this number gets too large,
@@ -73,11 +72,7 @@ For instance, the numbers of blockers is depicted for random 3-dimensional spher
in next figure. Storing the graph and blockers of such simplicial complexes is much compact in this case than storing
their simplices.
-
- *\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres" width=10cm
-
-
-
+\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres"
\section API
@@ -154,8 +149,8 @@ of a simplicial complex.
else
euler -= 1;
}
- std::cout << "Saw "<<num_vertices<<" vertices, "<<num_edges<<" edges and "<<num_simplices<<" simplices"<<std::endl;
- std::cout << "The Euler Characteristic is "<<euler<<std::endl;
+ std::clog << "Saw "<<num_vertices<<" vertices, "<<num_edges<<" edges and "<<num_simplices<<" simplices"<<std::endl;
+ std::clog << "The Euler Characteristic is "<<euler<<std::endl;
\endcode
@@ -182,13 +177,13 @@ The Euler Characteristic is 1
//get complex from top faces
make_complex_from_top_faces(complex,simplices.begin(),simplices.end());
- std::cout << "Simplices:"<<std::endl;
+ std::clog << "Simplices:"<<std::endl;
for(const Simplex & s : complex.star_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
+ std::clog << s << " ";
+ std::clog << std::endl;
//One blocker as simplex 0123 is not in the complex but all its proper faces are.
- std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
+ std::clog << "Blockers: "<<complex.blockers_to_string()<<std::endl;
//now build a complex from its full list of simplices
simplices.clear();
@@ -200,13 +195,13 @@ The Euler Characteristic is 1
simplices.push_back(Simplex(Vertex_handle(2),Vertex_handle(0)));
complex = Complex(simplices.begin(),simplices.end());
- std::cout << "Simplices:"<<std::endl;
+ std::clog << "Simplices:"<<std::endl;
for(const Simplex & s : complex.star_simplex_range())
- std::cout << s << " ";
- std::cout << std::endl;
+ std::clog << s << " ";
+ std::clog << std::endl;
//One blocker as simplex 012 is not in the complex but all its proper faces are.
- std::cout << "Blockers: "<<complex.blockers_to_string()<<std::endl;
+ std::clog << "Blockers: "<<complex.blockers_to_string()<<std::endl;
\endcode
\verbatim
./SkeletonBlockerFromSimplices
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
index 0c0cc624..d091d7dd 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
@@ -28,7 +28,7 @@ namespace skeleton_blocker {
*/
struct Skeleton_blocker_simple_traits {
/**
- * @brief Global and local handle similar to <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>.
+ * @brief Global and local handle similar to <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>.
* Vertices are stored in a vector.
* For the root simplicial complex, the local and global descriptors are the same.
* For a subcomplex L and one of its vertices 'v', the local descriptor of 'v' is its position in
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
index 12fe6469..d83c0ab3 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
@@ -134,7 +134,7 @@ class Skeleton_blocker_simplex {
}
/**
- * Substracts a from the simplex.
+ * Subtracts a from the simplex.
*/
void difference(const Skeleton_blocker_simplex & a) {
std::vector<T> v;
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
index 4c48ff31..4c0c7dad 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
@@ -76,8 +76,8 @@ class Skeleton_blocker_sub_complex : public ComplexType {
public:
/**
* Add a vertex 'global' of K to L. When added to L, this vertex will receive
- * another number, addresses(global), its local adress.
- * return the adress where the vertex lay on L.
+ * another number, addresses(global), its local address.
+ * return the address where the vertex lay on L.
* The vertex corresponding to 'global' must not be already present
* in the complex.
*/
@@ -174,7 +174,7 @@ class Skeleton_blocker_sub_complex : public ComplexType {
// /**
// * Allocates a simplex in L corresponding to the simplex s in K
- // * with its local adresses and returns an AddressSimplex.
+ // * with its local addresses and returns an AddressSimplex.
// */
// boost::optional<Simplex> get_address(const Root_simplex_handle & s) const;
@@ -196,10 +196,8 @@ class Skeleton_blocker_sub_complex : public ComplexType {
};
/**
- * @remark remarque perte de temps a creer un nouveau simplexe a chaque fois
- * alors qu'on pourrait utiliser a la place de 'addresses_sigma_in_link'
- * un simplex avec des valeurs sp�ciales ComplexDS::null_vertex par exemple
- * pour indiquer qu'un vertex n'appartient pas au complex
+ * @remark waste of time to create a new simplex each time when we could use instead of addresses_sigma_in_link a
+ * simplex with special values (ComplexDS::null_vertex e.g.) to indicate that a vertex does not belong to the complex.
*/
template<typename ComplexType>
bool proper_face_in_union(
@@ -226,7 +224,7 @@ bool proper_face_in_union(
}
// Remark: this function should be friend in order to leave get_adresses private
-// however doing so seemes currently not possible due to a visual studio bug c2668
+// however doing so seems currently not possible due to a visual studio bug c2668
// "the compiler does not support partial ordering of template functions as specified in the C++ Standard"
// http://www.serkey.com/error-c2668-ambiguous-call-to-overloaded-function-bb45ft.html
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
index a43fa034..116bc779 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
@@ -107,7 +107,7 @@ struct Trie {
}
/**
- * Goes to the root in the trie to consitute simplex
+ * Goes to the root in the trie to constitute simplex
*/
void add_vertices_up_to_the_root(Simplex& res) const {
res.add_vertex(v);
@@ -150,7 +150,7 @@ struct Trie {
++s_pos;
while (s_pos != s.end() && current != 0) {
bool found = false;
- for (const auto child : current->childs) {
+ for (const auto& child : current->childs) {
if (child->v == *s_pos) {
++s_pos;
current = child.get();
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
index 37c0b4d3..2c49a1b8 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
@@ -21,7 +21,7 @@ namespace skeleton_blocker {
/**
* \brief Iterator over the triangles that are
* adjacent to a vertex of the simplicial complex.
- * \remark Will be removed soon -> dont look
+ * \remark Will be removed soon -> don't look
*/
template<typename Complex, typename LinkType>
class Triangle_around_vertex_iterator : public boost::iterator_facade
@@ -95,7 +95,7 @@ class Triangle_around_vertex_iterator : public boost::iterator_facade
/**
* \brief Iterator over the triangles of the
* simplicial complex.
- * \remark Will be removed soon -> dont look
+ * \remark Will be removed soon -> don't look
*
*/
template<typename SkeletonBlockerComplex>
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
index 125c6387..b4ffc756 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
@@ -438,7 +438,7 @@ class Skeleton_blocker_complex {
}
/**
- * return the id of a vertex of adress local present in the graph
+ * return the id of a vertex of address local present in the graph
*/
Root_vertex_handle get_id(Vertex_handle local) const {
assert(0 <= local.vertex && local.vertex < boost::num_vertices(skeleton));
@@ -740,7 +740,7 @@ class Skeleton_blocker_complex {
* complex to the smallest flag complex that contains it.
*/
void remove_blockers() {
- // Desallocate the blockers
+ // Deallocate the blockers
while (!blocker_map_.empty()) {
delete_blocker(blocker_map_.begin()->second);
}
@@ -764,8 +764,8 @@ class Skeleton_blocker_complex {
public:
/**
- * Removes the simplex s from the set of blockers
- * and desallocate s.
+ * Removes the simplex sigma from the set of blockers
+ * and deallocate sigma.
*/
void delete_blocker(Blocker_handle sigma) {
if (visitor)
@@ -960,7 +960,7 @@ class Skeleton_blocker_complex {
}
/*
- * @brief returnrs true iff the complex is empty.
+ * @brief returns true iff the complex is empty.
*/
bool empty() const {
return num_vertices() == 0;
@@ -1043,7 +1043,7 @@ class Skeleton_blocker_complex {
if (num_vertices() == 1)
return true;
for (auto vi : vertex_range()) {
- // xxx todo faire une methode bool is_in_blocker(Vertex_handle)
+ // xxx todo create a method: bool is_in_blocker(Vertex_handle)
if (blocker_map_.find(vi) == blocker_map_.end()) {
// no blocker passes through the vertex, we just need to
// check if the current vertex is linked to all others vertices of the complex
@@ -1071,7 +1071,6 @@ class Skeleton_blocker_complex {
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
void remove_popable_blockers();
@@ -1103,7 +1102,6 @@ class Skeleton_blocker_complex {
public:
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
void remove_star(Vertex_handle a, Vertex_handle b);
@@ -1293,7 +1291,7 @@ class Skeleton_blocker_complex {
typedef boost::iterator_range<Complex_neighbors_vertices_iterator> Complex_neighbors_vertices_range;
/**
- * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes trough v
+ * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes through v
*/
Complex_neighbors_vertices_range vertex_range(Vertex_handle v) const {
auto begin = Complex_neighbors_vertices_iterator(this, v);
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
index a2637da3..b3bf0382 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
@@ -164,7 +164,7 @@ ComplexType> {
Vertex_handle y_parent = *parent_complex.get_address(
this->get_id(*y_link));
if (parent_complex.contains_edge(x_parent, y_parent)) {
- // we check that there is no blocker subset of alpha passing trough x and y
+ // we check that there is no blocker subset of alpha passing through x and y
bool new_edge = true;
for (auto blocker_parent : parent_complex.const_blocker_range(
x_parent)) {
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
index 404f04f9..e686aaec 100644..100755
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
@@ -39,7 +39,6 @@ bool Skeleton_blocker_complex<SkeletonBlockerDS>::is_popable_blocker(Blocker_han
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_popable_blockers() {
@@ -160,7 +159,6 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::update_blockers_after_remove_s
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_star(Vertex_handle a, Vertex_handle b) {
@@ -269,7 +267,7 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_blocker_include_in_simp
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::tip_blockers(Vertex_handle a, Vertex_handle b,
std::vector<Simplex> & buffer) const {
- for (auto const & blocker : this->const_blocker_range(a)) {
+ for (auto const blocker : this->const_blocker_range(a)) {
Simplex beta = (*blocker);
beta.remove_vertex(a);
buffer.push_back(beta);
diff --git a/src/Skeleton_blocker/test/test_skeleton_blocker_complex.cpp b/src/Skeleton_blocker/test/test_skeleton_blocker_complex.cpp
index 4336e33b..96438acf 100644
--- a/src/Skeleton_blocker/test/test_skeleton_blocker_complex.cpp
+++ b/src/Skeleton_blocker/test/test_skeleton_blocker_complex.cpp
@@ -91,10 +91,10 @@ BOOST_AUTO_TEST_CASE(test_skeleton_num_simplices) {
BOOST_AUTO_TEST_CASE(test_skeleton_iterator_vertices1) {
int n = 10;
Complex complex(10);
- std::cout << "complex.num_vertices():" << complex.num_vertices() << std::endl;
+ std::clog << "complex.num_vertices():" << complex.num_vertices() << std::endl;
int num_vertex_seen = 0;
for (auto vi : complex.vertex_range()) {
- std::cout << "vertex:" << vi << std::endl;
+ std::clog << "vertex:" << vi << std::endl;
++num_vertex_seen;
}
BOOST_CHECK(num_vertex_seen == n);
@@ -104,14 +104,14 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_vertices2) {
int n = 10;
Complex complex;
build_complete(10, complex);
- std::cout << "complex.num_vertices():" << complex.num_vertices() << std::endl;
- std::cout << "complex.num_edges():" << complex.num_edges() << std::endl;
+ std::clog << "complex.num_vertices():" << complex.num_vertices() << std::endl;
+ std::clog << "complex.num_edges():" << complex.num_edges() << std::endl;
int num_vertex_seen = 0;
for (auto vi : complex.vertex_range(Vertex_handle(2))) {
- std::cout << "vertex:" << vi << std::endl;
+ std::clog << "vertex:" << vi << std::endl;
++num_vertex_seen;
}
- std::cout << "num_vertex_seen:" << num_vertex_seen << std::endl;
+ std::clog << "num_vertex_seen:" << num_vertex_seen << std::endl;
BOOST_CHECK(num_vertex_seen == (n -1));
}
@@ -123,10 +123,10 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_edge) {
complex.add_edge_without_blockers(Vertex_handle(i), Vertex_handle(j));
complex.remove_edge(Vertex_handle(2), Vertex_handle(3));
complex.remove_edge(Vertex_handle(3), Vertex_handle(5));
- std::cout << "complex.num_edges():" << complex.num_edges() << std::endl;
+ std::clog << "complex.num_edges():" << complex.num_edges() << std::endl;
int num_edges_seen = 0;
for (auto edge : complex.edge_range()) {
- std::cout << "edge :" << complex[edge] << std::endl;
+ std::clog << "edge :" << complex[edge] << std::endl;
++num_edges_seen;
}
@@ -141,10 +141,10 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_edge2) {
complex.add_edge_without_blockers(Vertex_handle(i), Vertex_handle(j));
complex.remove_edge(Vertex_handle(2), Vertex_handle(3));
complex.remove_edge(Vertex_handle(3), Vertex_handle(5));
- std::cout << "complex.num_edges():" << complex.num_edges() << std::endl;
+ std::clog << "complex.num_edges():" << complex.num_edges() << std::endl;
int num_neigbors_seen = 0;
for (auto neighbor : complex.vertex_range(Vertex_handle(2))) {
- std::cout << "neighbor" << neighbor << std::endl;
+ std::clog << "neighbor" << neighbor << std::endl;
++num_neigbors_seen;
}
BOOST_CHECK(num_neigbors_seen == 8);
@@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_triangles) {
complex.add_edge_without_blockers(Vertex_handle(i), Vertex_handle(i + 1));
complex.add_edge_without_blockers(Vertex_handle(1), Vertex_handle(6));
- std::cout << complex.to_string() << std::endl;
+ std::clog << complex.to_string() << std::endl;
int num_triangles_seen = 0;
//for (auto t : complex.triangle_range(5)){
@@ -214,19 +214,19 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_simplices) {
expected_num_simplices[Vertex_handle(5)] = 7;
for (auto pair : expected_num_simplices) {
- std::cout << "found list: ";
+ std::clog << "found list: ";
unsigned num_simplices_around = 0;
for (const auto& simplex : complex.star_simplex_range(pair.first)) {
simplex.dimension();
- std::cout << simplex << " - ";
+ std::clog << simplex << " - ";
++num_simplices_around;
}
BOOST_CHECK(num_simplices_around == pair.second);
- std::cout << std::endl << "current vertex:" << pair.first << " - ";
- std::cout << "expected_num_simplices:" << pair.second << " - ";
- std::cout << "found:" << num_simplices_around << std::endl;
+ std::clog << std::endl << "current vertex:" << pair.first << " - ";
+ std::clog << "expected_num_simplices:" << pair.second << " - ";
+ std::clog << "found:" << num_simplices_around << std::endl;
}
}
@@ -276,19 +276,19 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_simplices3) {
BOOST_AUTO_TEST_CASE(test_skeleton_iterator_simplices4) {
Complex empty_complex;
for (auto v : empty_complex.vertex_range()) {
- std::cout << v;
+ std::clog << v;
BOOST_CHECK(false);
}
for (auto e : empty_complex.edge_range()) {
- std::cout << e;
+ std::clog << e;
BOOST_CHECK(false);
}
for (auto t : empty_complex.triangle_range()) {
- std::cout << t;
+ std::clog << t;
BOOST_CHECK(false);
}
for (auto s : empty_complex.complex_simplex_range()) {
- std::cout << s;
+ std::clog << s;
BOOST_CHECK(false);
}
}
@@ -297,7 +297,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_iterator_coboundary) {
Complex c;
build_complete(4, c);
c.remove_edge(Vertex_handle(1), Vertex_handle(3));
- std::cout << c.to_string();
+ std::clog << c.to_string();
Simplex s02(Vertex_handle(0), Vertex_handle(2));
int n = 0;
std::set<Simplex> expected;
@@ -373,7 +373,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link0) {
auto L2 = complex.link(alpha);
BOOST_CHECK(L == L2);
- std::cout << L.to_string();
+ std::clog << L.to_string();
BOOST_CHECK(L.contains_vertex(*L.get_address(Root_vertex_handle(b))));
BOOST_CHECK(L.contains_vertex(*L.get_address(Root_vertex_handle(d))));
@@ -432,9 +432,9 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link2) {
// Complexes built
// Print result
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
- std::cout << "L= Link_complex(" << alpha << ") : \n" << L.to_string();
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
+ std::clog << "L= Link_complex(" << alpha << ") : \n" << L.to_string();
auto L2 = complex.link(alpha);
BOOST_CHECK(L == L2);
@@ -472,9 +472,9 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link3) {
// Complexes built
// Print result
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
- std::cout << "L= Link_complex(" << alpha << ") : \n" << L.to_string();
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
+ std::clog << "L= Link_complex(" << alpha << ") : \n" << L.to_string();
auto L2 = complex.link(alpha);
BOOST_CHECK(L == L2);
@@ -529,8 +529,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link5) {
// Complexes built
// Print result
- std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl;
- std::cout << "Link: " << L.to_string() << std::endl;
+ std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl;
+ std::clog << "Link: " << L.to_string() << std::endl;
// verification
BOOST_CHECK(L.num_vertices() == 0);
@@ -549,8 +549,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link6) {
build_link_of_blocker(complex, alpha, link_blocker_alpha);
// Print result
- std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl;
- std::cout << "Link: " << link_blocker_alpha.to_string() << std::endl;
+ std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl;
+ std::clog << "Link: " << link_blocker_alpha.to_string() << std::endl;
// verification
BOOST_CHECK(link_blocker_alpha.num_vertices() == 1);
@@ -579,12 +579,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_link7) {
//the result should be the edge {6,7} plus the blocker {0,1,2}
// Print result
- std::cout << "Complex: " << complex.to_string()<< std::endl << std::endl;
- std::cout << "Link: " << link_blocker_alpha.to_string() << std::endl;
+ std::clog << "Complex: " << complex.to_string()<< std::endl << std::endl;
+ std::clog << "Link: " << link_blocker_alpha.to_string() << std::endl;
Skeleton_blocker_link_complex link_blocker_alpha_cpy = link_blocker_alpha;
- std::cout << "Link copy: " << link_blocker_alpha_cpy.to_string() << std::endl;
+ std::clog << "Link copy: " << link_blocker_alpha_cpy.to_string() << std::endl;
BOOST_CHECK(link_blocker_alpha.num_vertices() == link_blocker_alpha_cpy.num_vertices());
BOOST_CHECK(link_blocker_alpha.num_blockers() == link_blocker_alpha_cpy.num_blockers());
@@ -640,7 +640,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 1:\n" << complex.to_string();
+ std::clog << "Constructor 1:\n" << complex.to_string();
BOOST_CHECK(complex.num_vertices() == 6);
BOOST_CHECK(complex.num_edges() == 10);
@@ -677,10 +677,10 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor2) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 2:\n" << complex.to_string();
+ std::clog << "Constructor 2:\n" << complex.to_string();
for (auto b : complex.const_blocker_range()) {
- std::cout << "b:" << b << std::endl;
+ std::clog << "b:" << b << std::endl;
}
BOOST_CHECK(complex.num_vertices() == 5);
@@ -698,7 +698,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor3) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 3:\n" << complex.to_string();
+ std::clog << "Constructor 3:\n" << complex.to_string();
BOOST_CHECK(complex.num_blockers() == 1);
Sh expected_blocker(Vh(0), Vh(1), Vh(2));
@@ -723,7 +723,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor4) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 4:\n" << complex.to_string();
+ std::clog << "Constructor 4:\n" << complex.to_string();
BOOST_CHECK(complex.num_blockers() == 1);
Sh expected_blocker(Vh(0), Vh(1), Vh(4));
for (auto b : complex.const_blocker_range())
@@ -753,7 +753,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor5) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 5:\n" << complex.to_string();
+ std::clog << "Constructor 5:\n" << complex.to_string();
BOOST_CHECK(complex.num_vertices() == 6);
BOOST_CHECK(complex.num_blockers() == 3);
@@ -773,7 +773,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor6) {
Complex complex(simplices.begin(), simplices.end());
- std::cout << "Constructor 6:\n" << complex.to_string();
+ std::clog << "Constructor 6:\n" << complex.to_string();
BOOST_CHECK(complex.num_vertices() == 4);
BOOST_CHECK(complex.num_blockers() == 1);
@@ -795,7 +795,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor7) {
//get complex from top faces
Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces<Complex>(simplices.begin(), simplices.end()));
- std::cout << "Constructor 7:\n" << complex.to_string();
+ std::clog << "Constructor 7:\n" << complex.to_string();
BOOST_CHECK(complex.num_vertices() == 4);
BOOST_CHECK(complex.num_blockers() == 1);
@@ -818,7 +818,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_complex_constructor8) {
//get complex from top faces
Complex complex(Gudhi::skeleton_blocker::make_complex_from_top_faces<Complex>(simplices.begin(), simplices.end()));
- std::cout << "Constructor 8:\n" << complex.to_string();
+ std::clog << "Constructor 8:\n" << complex.to_string();
BOOST_CHECK(complex.num_vertices() == 4);
BOOST_CHECK(complex.num_blockers() == 2);
diff --git a/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp b/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp
index 8cad97a1..9042ddcf 100644
--- a/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp
+++ b/src/Skeleton_blocker/test/test_skeleton_blocker_geometric_complex.cpp
@@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_off_reader_writer) {
Gudhi::skeleton_blocker::Skeleton_blocker_off_reader<Complex> off_reader("test2.off", complex);
BOOST_CHECK(off_reader.is_valid());
- std::cout << "complex has " <<
+ std::clog << "complex has " <<
complex.num_vertices() << " vertices, " <<
complex.num_blockers() << " blockers, " <<
complex.num_edges() << " edges and " <<
@@ -50,8 +50,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_off_reader_writer) {
Complex same;
Gudhi::skeleton_blocker::Skeleton_blocker_off_reader<Complex> off_reader2("tmp.off", same);
- std::cout << "\ncomplex:" << complex.to_string() << std::endl;
- std::cout << "\nsame:" << same.to_string() << std::endl;
+ std::clog << "\ncomplex:" << complex.to_string() << std::endl;
+ std::clog << "\nsame:" << same.to_string() << std::endl;
BOOST_CHECK(complex == same);
}
@@ -61,7 +61,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) {
Gudhi::skeleton_blocker::Skeleton_blocker_off_reader<Complex> off_reader("test2.off", complex);
BOOST_CHECK(off_reader.is_valid());
- std::cout << "complex has " <<
+ std::clog << "complex has " <<
complex.num_vertices() << " vertices, " <<
complex.num_blockers() << " blockers, " <<
complex.num_edges() << " edges and " <<
@@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) {
auto link_0 = complex.abstract_link(Vertex_handle(0));
- std::cout << "\n link(0):" << link_0.to_string() << std::endl;
+ std::clog << "\n link(0):" << link_0.to_string() << std::endl;
BOOST_CHECK(link_0.num_vertices() == 2);
BOOST_CHECK(link_0.num_edges() == 1);
@@ -91,13 +91,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_abstract_link) {
BOOST_CHECK(link_0[*(edge_handle)].second() == Root_vertex_handle(4));
auto link_geometric_0 = complex.link(Vertex_handle(0));
- std::cout << "\n link_geometric(0):" << link_geometric_0.to_string() << std::endl;
+ std::clog << "\n link_geometric(0):" << link_geometric_0.to_string() << std::endl;
BOOST_CHECK(link_0 == link_geometric_0);
auto print_point = [&](Vertex_handle v) {
- for (auto x : link_geometric_0.point(v)) std::cout << x << " ";
- std::cout << std::endl;
+ for (auto x : link_geometric_0.point(v)) std::clog << x << " ";
+ std::clog << std::endl;
};
std::for_each(link_geometric_0.vertex_range().begin(), link_geometric_0.vertex_range().end(), print_point);
diff --git a/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp b/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp
index b714753d..a85d4ff0 100644
--- a/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp
+++ b/src/Skeleton_blocker/test/test_skeleton_blocker_simplifiable.cpp
@@ -49,12 +49,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_contraction1) {
static_cast<Vertex_handle> (y)));
// Print result
- std::cout << "complex before complex" << complex.to_string() << std::endl;
+ std::clog << "complex before complex" << complex.to_string() << std::endl;
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
complex.contract_edge(static_cast<Vertex_handle> (a), static_cast<Vertex_handle> (b));
// Print result
- std::cout << "ContractEdge(0,1)\n";
+ std::clog << "ContractEdge(0,1)\n";
PRINT(complex.to_string());
// verification
@@ -89,13 +89,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_contraction2) {
complex.add_blocker(blocker);
// Print result
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
complex.contract_edge(static_cast<Vertex_handle> (a), static_cast<Vertex_handle> (b));
- std::cout << "complex.ContractEdge(a,b)" << complex.to_string();
+ std::clog << "complex.ContractEdge(a,b)" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
// there should be one blocker (a,c,d,e) in the complex
BOOST_CHECK(complex.contains_blocker(Simplex(static_cast<Vertex_handle> (a), static_cast<Vertex_handle> (x),
@@ -110,8 +110,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_link_condition1) {
complex.add_blocker(Simplex(static_cast<Vertex_handle> (0), static_cast<Vertex_handle> (1), static_cast<Vertex_handle> (2)));
// Print result
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
BOOST_CHECK(complex.link_condition(Vertex_handle(1), Vertex_handle(2), true));
@@ -125,13 +125,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse0) {
complex.add_edge_without_blockers(static_cast<Vertex_handle> (2), static_cast<Vertex_handle> (4));
complex.add_edge_without_blockers(static_cast<Vertex_handle> (3), static_cast<Vertex_handle> (4));
// Print result
- std::cout << "initial complex :\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex :\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
Simplex simplex_123(static_cast<Vertex_handle> (1), static_cast<Vertex_handle> (2), static_cast<Vertex_handle> (3));
complex.remove_star(simplex_123);
- std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
// verification
BOOST_CHECK(complex.contains_blocker(simplex_123));
@@ -142,13 +142,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse1) {
build_complete(4, complex);
complex.add_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2), Vertex_handle(3)));
// Print result
- std::cout << "initial complex :\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex :\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
Simplex simplex_123(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3));
complex.remove_star(simplex_123);
- std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
// verification
BOOST_CHECK(complex.contains_blocker(simplex_123));
@@ -164,13 +164,13 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse2) {
complex.add_edge_without_blockers(Vertex_handle(3), Vertex_handle(4));
complex.add_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3), Vertex_handle(4)));
// Print result
- std::cout << "initial complex :\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex :\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
Simplex sigma(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3));
complex.remove_star(sigma);
- std::cout << "complex.remove_star(1,2,3):\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex.remove_star(1,2,3):\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
// verification
BOOST_CHECK(!complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2),
@@ -187,11 +187,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_collapse3) {
complex.add_edge_without_blockers(Vertex_handle(3), Vertex_handle(4));
complex.add_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3), Vertex_handle(4)));
// Print result
- std::cout << "initial complex:\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex:\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
complex.remove_star(static_cast<Vertex_handle> (2));
- std::cout << "complex after remove star of 2:\n" << complex.to_string();
+ std::clog << "complex after remove star of 2:\n" << complex.to_string();
BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(3), Vertex_handle(4))));
BOOST_CHECK(!complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2),
@@ -202,11 +202,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex) {
Complex complex(4);
build_complete(4, complex);
complex.add_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3)));
- std::cout << "initial complex:\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex:\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
complex.add_simplex(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3)));
- std::cout << "complex after add_simplex:\n" << complex.to_string();
+ std::clog << "complex after add_simplex:\n" << complex.to_string();
BOOST_CHECK(complex.num_blockers() == 1);
BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1),
Vertex_handle(2), Vertex_handle(3))));
@@ -216,8 +216,8 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex2) {
Complex complex;
build_complete(4, complex);
// Print result
- std::cout << "initial complex:\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex:\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
Complex copy(complex.num_vertices());
@@ -232,7 +232,7 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex2) {
copy.add_simplex(simplex);
}
- std::cout << "complex after add_simplex:\n" << copy.to_string();
+ std::clog << "complex after add_simplex:\n" << copy.to_string();
BOOST_CHECK(complex.num_blockers() == copy.num_blockers());
BOOST_CHECK(complex.num_edges() == copy.num_edges());
@@ -246,11 +246,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_simplex3) {
Simplex sigma(Vertex_handle(0), Vertex_handle(1), Vertex_handle(2));
complex.add_blocker(sigma);
// Print result
- std::cout << "initial complex:\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex:\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
complex.add_simplex(sigma);
//should create two blockers 0123 and 0124
- std::cout << "complex after adding simplex 012:\n" << complex.to_string();
+ std::clog << "complex after adding simplex 012:\n" << complex.to_string();
BOOST_CHECK(complex.num_blockers() == 2);
BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1),
Vertex_handle(2), Vertex_handle(3))));
@@ -292,11 +292,11 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_add_edge) {
complex.add_edge(Vertex_handle(i), Vertex_handle((i + 1) % 4));
// Print result
- std::cout << "initial complex:\n" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "initial complex:\n" << complex.to_string();
+ std::clog << std::endl << std::endl;
complex.add_edge(Vertex_handle(1), Vertex_handle(3));
//should create two blockers 013 and 012
- std::cout << "complex after adding edge 13:\n" << complex.to_string();
+ std::clog << "complex after adding edge 13:\n" << complex.to_string();
BOOST_CHECK(complex.num_blockers() == 2);
BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(0), Vertex_handle(1), Vertex_handle(3))));
BOOST_CHECK(complex.contains_blocker(Simplex(Vertex_handle(1), Vertex_handle(2), Vertex_handle(3))));
@@ -313,12 +313,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_remove_popable_blockers)
complex.add_blocker(sigma1);
complex.add_blocker(sigma2);
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
- std::cout << "complex.RemovePopableBlockers();" << std::endl;
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
+ std::clog << "complex.RemovePopableBlockers();" << std::endl;
complex.remove_popable_blockers();
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
BOOST_CHECK(complex.num_blockers() == 1);
@@ -337,12 +337,12 @@ BOOST_AUTO_TEST_CASE(test_skeleton_blocker_simplifiable_remove_popable_blockers)
complex.add_blocker(sigma1);
complex.add_blocker(sigma2);
- std::cout << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
- std::cout << "complex.RemovePopableBlockers();" << std::endl;
+ std::clog << "complex complex" << complex.to_string();
+ std::clog << std::endl << std::endl;
+ std::clog << "complex.RemovePopableBlockers();" << std::endl;
complex.remove_popable_blockers();
- std::cout << "complex complex" << complex.to_string();
+ std::clog << "complex complex" << complex.to_string();
- std::cout << std::endl << std::endl;
+ std::clog << std::endl << std::endl;
BOOST_CHECK(complex.num_blockers() == 0);
}
diff --git a/src/Spatial_searching/doc/Intro_spatial_searching.h b/src/Spatial_searching/doc/Intro_spatial_searching.h
index 30805570..81c5a3aa 100644
--- a/src/Spatial_searching/doc/Intro_spatial_searching.h
+++ b/src/Spatial_searching/doc/Intro_spatial_searching.h
@@ -36,7 +36,7 @@ namespace spatial_searching {
*
* This example generates 500 random points, then performs all-near-neighbors searches, and queries for nearest and furthest neighbors using different methods.
*
- * \include Spatial_searching/example_spatial_searching.cpp
+ * \include example_spatial_searching.cpp
*
*/
/** @} */ // end defgroup spatial_searching
diff --git a/src/Spatial_searching/example/CMakeLists.txt b/src/Spatial_searching/example/CMakeLists.txt
index eeb3e85f..308afa00 100644
--- a/src/Spatial_searching/example/CMakeLists.txt
+++ b/src/Spatial_searching/example/CMakeLists.txt
@@ -5,5 +5,4 @@ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
target_link_libraries(Spatial_searching_example_spatial_searching ${CGAL_LIBRARY})
add_test(NAME Spatial_searching_example_spatial_searching
COMMAND $<TARGET_FILE:Spatial_searching_example_spatial_searching>)
- install(TARGETS Spatial_searching_example_spatial_searching DESTINATION bin)
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Spatial_searching/example/example_spatial_searching.cpp b/src/Spatial_searching/example/example_spatial_searching.cpp
index 034ad24a..09c2dabf 100644
--- a/src/Spatial_searching/example/example_spatial_searching.cpp
+++ b/src/Spatial_searching/example/example_spatial_searching.cpp
@@ -23,38 +23,38 @@ int main(void) {
Points_ds points_ds(points);
// 10-nearest neighbor query
- std::cout << "10 nearest neighbors from points[20]:\n";
+ std::clog << "10 nearest neighbors from points[20]:\n";
auto knn_range = points_ds.k_nearest_neighbors(points[20], 10, true);
- for (auto const& nghb : knn_range)
- std::cout << nghb.first << " (sq. dist. = " << nghb.second << ")\n";
+ for (auto const nghb : knn_range)
+ std::clog << nghb.first << " (sq. dist. = " << nghb.second << ")\n";
// Incremental nearest neighbor query
- std::cout << "Incremental nearest neighbors:\n";
+ std::clog << "Incremental nearest neighbors:\n";
auto inn_range = points_ds.incremental_nearest_neighbors(points[45]);
// Get the neighbors in distance order until we hit the first point
for (auto ins_iterator = inn_range.begin(); ins_iterator->first != 0; ++ins_iterator)
- std::cout << ins_iterator->first << " (sq. dist. = " << ins_iterator->second << ")\n";
+ std::clog << ins_iterator->first << " (sq. dist. = " << ins_iterator->second << ")\n";
// 10-furthest neighbor query
- std::cout << "10 furthest neighbors from points[20]:\n";
+ std::clog << "10 furthest neighbors from points[20]:\n";
auto kfn_range = points_ds.k_furthest_neighbors(points[20], 10, true);
- for (auto const& nghb : kfn_range)
- std::cout << nghb.first << " (sq. dist. = " << nghb.second << ")\n";
+ for (auto const nghb : kfn_range)
+ std::clog << nghb.first << " (sq. dist. = " << nghb.second << ")\n";
// Incremental furthest neighbor query
- std::cout << "Incremental furthest neighbors:\n";
+ std::clog << "Incremental furthest neighbors:\n";
auto ifn_range = points_ds.incremental_furthest_neighbors(points[45]);
// Get the neighbors in distance reverse order until we hit the first point
for (auto ifs_iterator = ifn_range.begin(); ifs_iterator->first != 0; ++ifs_iterator)
- std::cout << ifs_iterator->first << " (sq. dist. = " << ifs_iterator->second << ")\n";
+ std::clog << ifs_iterator->first << " (sq. dist. = " << ifs_iterator->second << ")\n";
// All-near-neighbors search
- std::cout << "All-near-neighbors search:\n";
+ std::clog << "All-near-neighbors search:\n";
std::vector<std::size_t> rs_result;
points_ds.all_near_neighbors(points[45], 0.5, std::back_inserter(rs_result));
K k;
for (auto const& p_idx : rs_result)
- std::cout << p_idx << " (sq. dist. = " << k.squared_distance_d_object()(points[p_idx], points[45]) << ")\n";
+ std::clog << p_idx << " (sq. dist. = " << k.squared_distance_d_object()(points[p_idx], points[45]) << ")\n";
return 0;
}
diff --git a/src/Spatial_searching/include/gudhi/Kd_tree_search.h b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
index 87969dd9..6fb611f2 100644
--- a/src/Spatial_searching/include/gudhi/Kd_tree_search.h
+++ b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
@@ -12,11 +12,12 @@
#ifndef KD_TREE_SEARCH_H_
#define KD_TREE_SEARCH_H_
+#include <gudhi/Debug_utils.h>
+
#include <CGAL/Orthogonal_k_neighbor_search.h>
#include <CGAL/Orthogonal_incremental_neighbor_search.h>
#include <CGAL/Search_traits.h>
#include <CGAL/Search_traits_adapter.h>
-#include <CGAL/Fuzzy_sphere.h>
#include <CGAL/property_map.h>
#include <CGAL/version.h> // for CGAL_VERSION_NR
@@ -40,7 +41,6 @@
namespace Gudhi {
namespace spatial_searching {
-
/**
* \class Kd_tree_search Kd_tree_search.h gudhi/Kd_tree_search.h
* \brief Spatial tree data structure to perform (approximate) nearest and furthest neighbor search.
@@ -83,7 +83,8 @@ class Kd_tree_search {
typedef CGAL::Search_traits<
FT, Point,
typename Traits::Cartesian_const_iterator_d,
- typename Traits::Construct_cartesian_const_iterator_d> Traits_base;
+ typename Traits::Construct_cartesian_const_iterator_d,
+ typename Traits::Dimension> Traits_base;
typedef CGAL::Search_traits_adapter<
std::ptrdiff_t,
@@ -110,7 +111,76 @@ class Kd_tree_search {
/// of a point P and `second` is the squared distance between P and the query point.
typedef Incremental_neighbor_search INS_range;
- typedef CGAL::Fuzzy_sphere<STraits> Fuzzy_sphere;
+ // Because CGAL::Fuzzy_sphere takes the radius and not its square
+ struct Sphere_for_kdtree_search
+ {
+ typedef typename Traits::Point_d Point_d;
+ typedef typename Traits::FT FT;
+ typedef typename Traits::Dimension D;
+ typedef D Dimension;
+
+ private:
+ STraits traits;
+ Point_d c;
+ FT sqradmin, sqradmax;
+ bool use_max;
+
+ public:
+ // `prefer_max` means that we prefer outputting more points at squared distance between r2min and r2max,
+ // while `!prefer_max` means we prefer fewer.
+ Sphere_for_kdtree_search(Point_d const& c_, FT const& r2min, FT const& r2max, bool prefer_max=true, STraits const& traits_ = {})
+ : traits(traits_), c(c_), sqradmin(r2min), sqradmax(r2max), use_max(prefer_max)
+ { GUDHI_CHECK(r2min >= 0 && r2max >= r2min, "0 <= r2min <= r2max"); }
+
+ bool contains(std::ptrdiff_t i) const {
+ const Point_d& p = get(traits.point_property_map(), i);
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ return contains_point_given_as_coordinates(ccci(p), ccci(p, 0));
+ }
+
+ template <typename Coord_iterator>
+ bool contains_point_given_as_coordinates(Coord_iterator pi, Coord_iterator) const {
+ FT distance = 0;
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ FT const& limit = use_max ? sqradmax : sqradmin;
+ while (ci != ce) {
+ distance += CGAL::square(*pi++ - *ci++);
+ // I think Clément advised to check the distance at every step instead of
+ // just at the end, especially when the dimension becomes large. Distance
+ // isn't part of the concept anyway.
+ if (distance > limit) return false;
+ }
+ return true;
+ }
+
+ bool inner_range_intersects(CGAL::Kd_tree_rectangle<FT, D> const& rect) const {
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ FT distance = 0;
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ for (int i = 0; ci != ce; ++i, ++ci) {
+ distance += CGAL::square(CGAL::max<FT>(CGAL::max<FT>(*ci - rect.max_coord(i), rect.min_coord(i) - *ci), 0 ));
+ if (distance > sqradmin) return false;
+ }
+ return true;
+ }
+
+
+ bool outer_range_contains(CGAL::Kd_tree_rectangle<FT, D> const& rect) const {
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ FT distance = 0;
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ for (int i = 0; ci != ce; ++i, ++ci) {
+ distance += CGAL::square(CGAL::max<FT>(*ci - rect.min_coord(i), rect.max_coord(i) - *ci));
+ if (distance > sqradmax) return false;
+ }
+ return true;
+ }
+ };
+
/// \brief Constructor
/// @param[in] points Const reference to the point range. This range
/// is not copied, so it should not be destroyed or modified afterwards.
@@ -266,10 +336,26 @@ class Kd_tree_search {
/// @param[in] eps Approximation factor.
template <typename OutputIterator>
void all_near_neighbors(Point const& p,
- FT radius,
+ FT const& radius,
OutputIterator it,
FT eps = FT(0)) const {
- m_tree.search(it, Fuzzy_sphere(p, radius, eps, m_tree.traits()));
+ all_near_neighbors2(p, CGAL::square(radius - eps), CGAL::square(radius + eps), it);
+ }
+
+ /// \brief Search for all the neighbors in a ball. This is similar to `all_near_neighbors` but takes directly
+ /// the square of the minimum distance below which points must be considered neighbors and square of the
+ /// maximum distance above which they cannot be.
+ /// @param[in] p The query point.
+ /// @param[in] sq_radius_min The square of the minimum search radius
+ /// @param[in] sq_radius_max The square of the maximum search radius
+ /// @param[out] it The points that lie inside the sphere of center `p` and squared radius `sq_radius`.
+ /// Note: `it` is used this way: `*it++ = each_point`.
+ template <typename OutputIterator>
+ void all_near_neighbors2(Point const& p,
+ FT const& sq_radius_min,
+ FT const& sq_radius_max,
+ OutputIterator it) const {
+ m_tree.search(it, Sphere_for_kdtree_search(p, sq_radius_min, sq_radius_max, true, m_tree.traits()));
}
int tree_depth() const {
diff --git a/src/Spatial_searching/test/test_Kd_tree_search.cpp b/src/Spatial_searching/test/test_Kd_tree_search.cpp
index d6c6fba3..e9acfaa7 100644
--- a/src/Spatial_searching/test/test_Kd_tree_search.cpp
+++ b/src/Spatial_searching/test/test_Kd_tree_search.cpp
@@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE(test_Kd_tree_search) {
std::vector<std::size_t> knn_result;
FT last_dist = -1.;
- for (auto const& nghb : kns_range) {
+ for (auto const nghb : kns_range) {
BOOST_CHECK(nghb.second > last_dist);
knn_result.push_back(nghb.second);
last_dist = nghb.second;
@@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE(test_Kd_tree_search) {
std::vector<std::size_t> kfn_result;
last_dist = kfn_range.begin()->second;
- for (auto const& nghb : kfn_range) {
+ for (auto const nghb : kfn_range) {
BOOST_CHECK(nghb.second <= last_dist);
kfn_result.push_back(nghb.second);
last_dist = nghb.second;
diff --git a/src/Subsampling/doc/Intro_subsampling.h b/src/Subsampling/doc/Intro_subsampling.h
index 1c84fb2e..1c366fe6 100644
--- a/src/Subsampling/doc/Intro_subsampling.h
+++ b/src/Subsampling/doc/Intro_subsampling.h
@@ -32,20 +32,20 @@ namespace subsampling {
* squared distance between any two points
* is greater than or equal to 0.4.
*
- * \include Subsampling/example_sparsify_point_set.cpp
+ * \include example_sparsify_point_set.cpp
*
* \section farthestpointexamples Example: choose_n_farthest_points
*
* This example outputs a subset of 100 points obtained by Gonz&aacute;lez algorithm,
* starting with a random point.
*
- * \include Subsampling/example_choose_n_farthest_points.cpp
+ * \include example_choose_n_farthest_points.cpp
*
* \section randompointexamples Example: pick_n_random_points
*
* This example outputs a subset of 100 points picked randomly.
*
- * \include Subsampling/example_pick_n_random_points.cpp
+ * \include example_pick_n_random_points.cpp
*/
/** @} */ // end defgroup subsampling
diff --git a/src/Subsampling/example/CMakeLists.txt b/src/Subsampling/example/CMakeLists.txt
index 28aab103..f4a23d22 100644
--- a/src/Subsampling/example/CMakeLists.txt
+++ b/src/Subsampling/example/CMakeLists.txt
@@ -3,7 +3,6 @@ project(Subsampling_examples)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable(Subsampling_example_pick_n_random_points example_pick_n_random_points.cpp)
add_executable(Subsampling_example_choose_n_farthest_points example_choose_n_farthest_points.cpp)
- add_executable(Subsampling_example_custom_kernel example_custom_kernel.cpp)
add_executable(Subsampling_example_sparsify_point_set example_sparsify_point_set.cpp)
target_link_libraries(Subsampling_example_sparsify_point_set ${CGAL_LIBRARY})
@@ -13,10 +12,6 @@ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
COMMAND $<TARGET_FILE:Subsampling_example_choose_n_farthest_points>)
add_test(NAME Subsampling_example_sparsify_point_set
COMMAND $<TARGET_FILE:Subsampling_example_sparsify_point_set>)
-
- install(TARGETS Subsampling_example_pick_n_random_points DESTINATION bin)
- install(TARGETS Subsampling_example_choose_n_farthest_points DESTINATION bin)
- install(TARGETS Subsampling_example_custom_kernel DESTINATION bin)
- install(TARGETS Subsampling_example_sparsify_point_set DESTINATION bin)
-
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+
+add_executable(Subsampling_example_custom_distance example_custom_distance.cpp)
diff --git a/src/Subsampling/example/example_choose_n_farthest_points.cpp b/src/Subsampling/example/example_choose_n_farthest_points.cpp
index 5cfeb4d8..e8b3ce2d 100644
--- a/src/Subsampling/example/example_choose_n_farthest_points.cpp
+++ b/src/Subsampling/example/example_choose_n_farthest_points.cpp
@@ -20,11 +20,11 @@ int main(void) {
K k;
std::vector<Point_d> results;
- Gudhi::subsampling::choose_n_farthest_points(k, points, 100,
+ Gudhi::subsampling::choose_n_farthest_points(k.squared_distance_d_object(), points, 100,
Gudhi::subsampling::random_starting_point,
std::back_inserter(results));
- std::cout << "Before sparsification: " << points.size() << " points.\n";
- std::cout << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
return 0;
}
diff --git a/src/Subsampling/example/example_custom_distance.cpp b/src/Subsampling/example/example_custom_distance.cpp
new file mode 100644
index 00000000..3325b12d
--- /dev/null
+++ b/src/Subsampling/example/example_custom_distance.cpp
@@ -0,0 +1,44 @@
+#include <gudhi/choose_n_farthest_points.h>
+
+#include <iostream>
+#include <vector>
+#include <iterator>
+
+
+typedef unsigned Point;
+
+/* The class Distance contains a distance function defined on the set of points {0, 1, 2, 3}
+ * and computes a distance according to the matrix:
+ * 0 1 2 4
+ * 1 0 4 2
+ * 2 4 0 1
+ * 4 2 1 0
+ */
+class Distance {
+ private:
+ std::vector<std::vector<double>> matrix_;
+
+ public:
+ Distance() {
+ matrix_.push_back({0, 1, 2, 4});
+ matrix_.push_back({1, 0, 4, 2});
+ matrix_.push_back({2, 4, 0, 1});
+ matrix_.push_back({4, 2, 1, 0});
+ }
+
+ double operator()(Point p1, Point p2) const {
+ return matrix_[p1][p2];
+ }
+};
+
+int main(void) {
+ std::vector<Point> points = {0, 1, 2, 3};
+ std::vector<Point> results;
+
+ Gudhi::subsampling::choose_n_farthest_points(Distance(), points, 2,
+ Gudhi::subsampling::random_starting_point,
+ std::back_inserter(results));
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Result table: {" << results[0] << "," << results[1] << "}\n";
+}
diff --git a/src/Subsampling/example/example_custom_kernel.cpp b/src/Subsampling/example/example_custom_kernel.cpp
deleted file mode 100644
index f1eb757b..00000000
--- a/src/Subsampling/example/example_custom_kernel.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-#include <gudhi/choose_n_farthest_points.h>
-
-#include <iostream>
-#include <vector>
-#include <iterator>
-
-
-/* The class Kernel contains a distance function defined on the set of points {0, 1, 2, 3}
- * and computes a distance according to the matrix:
- * 0 1 2 4
- * 1 0 4 2
- * 2 4 0 1
- * 4 2 1 0
- */
-class Kernel {
- public:
- typedef double FT;
- typedef unsigned Point_d;
-
- // Class Squared_distance_d
- class Squared_distance_d {
- private:
- std::vector<std::vector<FT>> matrix_;
-
- public:
- Squared_distance_d() {
- matrix_.push_back(std::vector<FT>({0, 1, 2, 4}));
- matrix_.push_back(std::vector<FT>({1, 0, 4, 2}));
- matrix_.push_back(std::vector<FT>({2, 4, 0, 1}));
- matrix_.push_back(std::vector<FT>({4, 2, 1, 0}));
- }
-
- FT operator()(Point_d p1, Point_d p2) {
- return matrix_[p1][p2];
- }
- };
-
- // Constructor
- Kernel() {}
-
- // Object of type Squared_distance_d
- Squared_distance_d squared_distance_d_object() const {
- return Squared_distance_d();
- }
-};
-
-int main(void) {
- typedef Kernel K;
- typedef typename K::Point_d Point_d;
-
- K k;
- std::vector<Point_d> points = {0, 1, 2, 3};
- std::vector<Point_d> results;
-
- Gudhi::subsampling::choose_n_farthest_points(k, points, 2,
- Gudhi::subsampling::random_starting_point,
- std::back_inserter(results));
- std::cout << "Before sparsification: " << points.size() << " points.\n";
- std::cout << "After sparsification: " << results.size() << " points.\n";
- std::cout << "Result table: {" << results[0] << "," << results[1] << "}\n";
-
- return 0;
-}
diff --git a/src/Subsampling/example/example_pick_n_random_points.cpp b/src/Subsampling/example/example_pick_n_random_points.cpp
index 25266403..316feed1 100644
--- a/src/Subsampling/example/example_pick_n_random_points.cpp
+++ b/src/Subsampling/example/example_pick_n_random_points.cpp
@@ -21,8 +21,8 @@ int main(void) {
K k;
std::vector<Point_d> results;
Gudhi::subsampling::pick_n_random_points(points, 100, std::back_inserter(results));
- std::cout << "Before sparsification: " << points.size() << " points.\n";
- std::cout << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
return 0;
}
diff --git a/src/Subsampling/example/example_sparsify_point_set.cpp b/src/Subsampling/example/example_sparsify_point_set.cpp
index a8caa720..1e2c38c1 100644
--- a/src/Subsampling/example/example_sparsify_point_set.cpp
+++ b/src/Subsampling/example/example_sparsify_point_set.cpp
@@ -21,8 +21,8 @@ int main(void) {
K k;
std::vector<Point_d> results;
Gudhi::subsampling::sparsify_point_set(k, points, 0.4, std::back_inserter(results));
- std::cout << "Before sparsification: " << points.size() << " points.\n";
- std::cout << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
return 0;
}
diff --git a/src/Subsampling/include/gudhi/choose_n_farthest_points.h b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
index 66421a69..44c02df1 100644
--- a/src/Subsampling/include/gudhi/choose_n_farthest_points.h
+++ b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
@@ -38,32 +38,35 @@ enum : std::size_t {
* \ingroup subsampling
* \brief Subsample by a greedy strategy of iteratively adding the farthest point from the
* current chosen point set to the subsampling.
- * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`, with a random landmark.
- * \tparam Kernel must provide a type Kernel::Squared_distance_d which is a model of the
- * concept <a target="_blank"
- * href="http://doc.cgal.org/latest/Kernel_d/classKernel__d_1_1Squared__distance__d.html">Kernel_d::Squared_distance_d</a> (despite the name, taken from CGAL, this can be any kind of metric or proximity measure).
- * It must also contain a public member `squared_distance_d_object()` that returns an object of this type.
- * \tparam Point_range Range whose value type is Kernel::Point_d. It must provide random-access
- * via `operator[]` and the points should be stored contiguously in memory.
- * \tparam PointOutputIterator Output iterator whose value type is Kernel::Point_d.
- * \tparam DistanceOutputIterator Output iterator for distances.
- * \details It chooses `final_size` points from a random access range
- * `input_pts` and outputs them in the output iterator `output_it`. It also
+ * \details
+ * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`,
+ * with a random landmark.
+ * It chooses `final_size` points from a random access range
+ * `input_pts` (or the number of input points if `final_size` is larger)
+ * and outputs them in the output iterator `output_it`. It also
* outputs the distance from each of those points to the set of previous
* points in `dist_it`.
- * @param[in] k A kernel object.
- * @param[in] input_pts Const reference to the input points.
+ * \tparam Distance must provide an operator() that takes 2 points (value type of the range)
+ * and returns their distance (or some more general proximity measure) as a `double`.
+ * \tparam Point_range Random access range of points.
+ * \tparam PointOutputIterator Output iterator whose value type is the point type.
+ * \tparam DistanceOutputIterator Output iterator for distances.
+ * @param[in] dist A distance function.
+ * @param[in] input_pts The input points.
* @param[in] final_size The size of the subsample to compute.
* @param[in] starting_point The seed in the farthest point algorithm.
* @param[out] output_it The output iterator for points.
* @param[out] dist_it The optional output iterator for distances.
+ *
+ * \warning Older versions of this function took a CGAL kernel as argument. Users need to replace `k` with
+ * `k.squared_distance_d_object()` in the first argument of every call to `choose_n_farthest_points`.
*
*/
-template < typename Kernel,
+template < typename Distance,
typename Point_range,
typename PointOutputIterator,
typename DistanceOutputIterator = Null_output_iterator>
-void choose_n_farthest_points(Kernel const &k,
+void choose_n_farthest_points(Distance dist,
Point_range const &input_pts,
std::size_t final_size,
std::size_t starting_point,
@@ -85,32 +88,57 @@ void choose_n_farthest_points(Kernel const &k,
starting_point = dis(gen);
}
- typename Kernel::Squared_distance_d sqdist = k.squared_distance_d_object();
+ // FIXME: don't hard-code the type as double. For Epeck_d, we also want to handle types that do not have an infinity.
+ static_assert(std::numeric_limits<double>::has_infinity, "the number type needs to support infinity()");
- std::size_t current_number_of_landmarks = 0; // counter for landmarks
- const double infty = std::numeric_limits<double>::infinity(); // infinity (see next entry)
- std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from input_pts
+ *output_it++ = input_pts[starting_point];
+ *dist_it++ = std::numeric_limits<double>::infinity();
+ if (final_size == 1) return;
+
+ std::vector<std::size_t> points(nb_points); // map from remaining points to indexes in input_pts
+ std::vector< double > dist_to_L(nb_points); // vector of current distances to L from points
+ for(std::size_t i = 0; i < nb_points; ++i) {
+ points[i] = i;
+ dist_to_L[i] = dist(input_pts[i], input_pts[starting_point]);
+ }
+ // The indirection through points makes the program a bit slower. Some alternatives:
+ // - the original code never removed points and counted on them not
+ // reappearing because of a self-distance of 0. This causes unnecessary
+ // computations when final_size is large. It also causes trouble if there are
+ // input points at distance 0 from each other.
+ // - copy input_pts and update the local copy when removing points.
std::size_t curr_max_w = starting_point;
- for (current_number_of_landmarks = 0; current_number_of_landmarks != final_size; current_number_of_landmarks++) {
- // curr_max_w at this point is the next landmark
- *output_it++ = input_pts[curr_max_w];
- *dist_it++ = dist_to_L[curr_max_w];
+ for (std::size_t current_number_of_landmarks = 1; current_number_of_landmarks != final_size; current_number_of_landmarks++) {
+ std::size_t latest_landmark = points[curr_max_w];
+ // To remove the latest landmark at index curr_max_w, replace it
+ // with the last point and reduce the length of the vector.
+ std::size_t last = points.size() - 1;
+ if (curr_max_w != last) {
+ points[curr_max_w] = points[last];
+ dist_to_L[curr_max_w] = dist_to_L[last];
+ }
+ points.pop_back();
+
+ // Update distances to L.
std::size_t i = 0;
- for (auto&& p : input_pts) {
- double curr_dist = sqdist(p, *(std::begin(input_pts) + curr_max_w));
+ for (auto p : points) {
+ double curr_dist = dist(input_pts[p], input_pts[latest_landmark]);
if (curr_dist < dist_to_L[i])
dist_to_L[i] = curr_dist;
++i;
}
- // choose the next curr_max_w
- double curr_max_dist = 0; // used for defining the furhest point from L
- for (i = 0; i < dist_to_L.size(); i++)
+ // choose the next landmark
+ curr_max_w = 0;
+ double curr_max_dist = dist_to_L[curr_max_w]; // used for defining the furthest point from L
+ for (i = 1; i < points.size(); i++)
if (dist_to_L[i] > curr_max_dist) {
curr_max_dist = dist_to_L[i];
curr_max_w = i;
}
+ *output_it++ = input_pts[points[curr_max_w]];
+ *dist_it++ = dist_to_L[curr_max_w];
}
}
diff --git a/src/Subsampling/include/gudhi/pick_n_random_points.h b/src/Subsampling/include/gudhi/pick_n_random_points.h
index a67b2b84..e4246c29 100644
--- a/src/Subsampling/include/gudhi/pick_n_random_points.h
+++ b/src/Subsampling/include/gudhi/pick_n_random_points.h
@@ -11,7 +11,9 @@
#ifndef PICK_N_RANDOM_POINTS_H_
#define PICK_N_RANDOM_POINTS_H_
-#include <gudhi/Clock.h>
+#ifdef GUDHI_SUBSAMPLING_PROFILING
+# include <gudhi/Clock.h>
+#endif
#include <boost/range/size.hpp>
@@ -44,6 +46,12 @@ void pick_n_random_points(Point_container const &points,
Gudhi::Clock t;
#endif
+ std::random_device rd;
+ std::mt19937 g(rd());
+
+#if __cplusplus >= 201703L
+ std::sample(std::begin(points), std::end(points), output_it, final_size, g);
+#else
std::size_t nbP = boost::size(points);
if (final_size > nbP)
final_size = nbP;
@@ -51,14 +59,12 @@ void pick_n_random_points(Point_container const &points,
std::vector<int> landmarks(nbP);
std::iota(landmarks.begin(), landmarks.end(), 0);
- std::random_device rd;
- std::mt19937 g(rd());
-
std::shuffle(landmarks.begin(), landmarks.end(), g);
landmarks.resize(final_size);
for (int l : landmarks)
*output_it++ = points[l];
+#endif
#ifdef GUDHI_SUBSAMPLING_PROFILING
t.end();
diff --git a/src/Subsampling/include/gudhi/sparsify_point_set.h b/src/Subsampling/include/gudhi/sparsify_point_set.h
index b30cec80..b325fe3c 100644
--- a/src/Subsampling/include/gudhi/sparsify_point_set.h
+++ b/src/Subsampling/include/gudhi/sparsify_point_set.h
@@ -11,6 +11,8 @@
#ifndef SPARSIFY_POINT_SET_H_
#define SPARSIFY_POINT_SET_H_
+# include <boost/iterator/function_output_iterator.hpp>
+
#include <gudhi/Kd_tree_search.h>
#ifdef GUDHI_SUBSAMPLING_PROFILING
#include <gudhi/Clock.h>
@@ -27,7 +29,7 @@ namespace subsampling {
* \ingroup subsampling
* \brief Outputs a subset of the input points so that the
* squared distance between any two points
- * is greater than or equal to `min_squared_dist`.
+ * is greater than `min_squared_dist`.
*
* \tparam Kernel must be a model of the <a target="_blank"
* href="http://doc.cgal.org/latest/Spatial_searching/classSearchTraits.html">SearchTraits</a>
@@ -63,29 +65,15 @@ sparsify_point_set(
// Parse the input points, and add them if they are not too close to
// the other points
std::size_t pt_idx = 0;
- for (typename Point_range::const_iterator it_pt = input_pts.begin();
- it_pt != input_pts.end();
- ++it_pt, ++pt_idx) {
- if (dropped_points[pt_idx])
+ for (auto const& pt : input_pts) {
+ if (dropped_points[pt_idx++])
continue;
- *output_it++ = *it_pt;
-
- auto ins_range = points_ds.incremental_nearest_neighbors(*it_pt);
+ *output_it++ = pt;
// If another point Q is closer that min_squared_dist, mark Q to be dropped
- for (auto const& neighbor : ins_range) {
- std::size_t neighbor_point_idx = neighbor.first;
- // If the neighbor is too close, we drop the neighbor
- if (neighbor.second < min_squared_dist) {
- // N.B.: If neighbor_point_idx < pt_idx,
- // dropped_points[neighbor_point_idx] is already true but adding a
- // test doesn't make things faster, so why bother?
- dropped_points[neighbor_point_idx] = true;
- } else {
- break;
- }
- }
+ auto drop = [&dropped_points] (std::ptrdiff_t neighbor_point_idx) { dropped_points[neighbor_point_idx] = true; };
+ points_ds.all_near_neighbors2(pt, min_squared_dist, min_squared_dist, boost::make_function_output_iterator(std::ref(drop)));
}
#ifdef GUDHI_SUBSAMPLING_PROFILING
diff --git a/src/Subsampling/test/test_choose_n_farthest_points.cpp b/src/Subsampling/test/test_choose_n_farthest_points.cpp
index 5c4bd4cb..c384c61b 100644
--- a/src/Subsampling/test/test_choose_n_farthest_points.cpp
+++ b/src/Subsampling/test/test_choose_n_farthest_points.cpp
@@ -39,12 +39,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point, Kernel, list_of_tested
for (FT k = 0; k < 5; k += 1.0)
for (FT l = 0; l < 5; l += 1.0) {
std::vector<FT> point({i, j, k, l});
- points.push_back(Point_d(point.begin(), point.end()));
+ points.emplace_back(point.begin(), point.end());
}
landmarks.clear();
Kernel k;
- Gudhi::subsampling::choose_n_farthest_points(k, points, 100, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ auto d = k.squared_distance_d_object();
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 100, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
BOOST_CHECK(landmarks.size() == 100);
for (auto landmark : landmarks)
@@ -61,42 +62,52 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of
std::vector< FT > distances;
landmarks.clear();
Kernel k;
+ auto d = k.squared_distance_d_object();
// Choose -1 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
// Choose 0 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
// Choose 1 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
std::vector<FT> point({0.0, 0.0, 0.0, 0.0});
- points.push_back(Point_d(point.begin(), point.end()));
+ points.emplace_back(point.begin(), point.end());
// Choose -1 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 1 && distances.size() == 1);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
landmarks.clear(); distances.clear();
// Choose 0 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0 && distances.size() == 0);
landmarks.clear(); distances.clear();
// Choose 1 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 1 && distances.size() == 1);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
landmarks.clear(); distances.clear();
std::vector<FT> point2({1.0, 0.0, 0.0, 0.0});
- points.push_back(Point_d(point2.begin(), point2.end()));
- // Choose all farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ points.emplace_back(point2.begin(), point2.end());
+ // Choose all farthest points among 2 points
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 2 && distances.size() == 2);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
BOOST_CHECK(distances[1] == 1);
landmarks.clear(); distances.clear();
+
+ // Accept duplicated points
+ points.emplace_back(point.begin(), point.end());
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ BOOST_CHECK(landmarks.size() == 3 && distances.size() == 3);
+ BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
+ BOOST_CHECK(distances[1] == 1);
+ BOOST_CHECK(distances[2] == 0);
+ landmarks.clear(); distances.clear();
}
diff --git a/src/Subsampling/test/test_pick_n_random_points.cpp b/src/Subsampling/test/test_pick_n_random_points.cpp
index 018fb8d2..fafae2af 100644
--- a/src/Subsampling/test/test_pick_n_random_points.cpp
+++ b/src/Subsampling/test/test_pick_n_random_points.cpp
@@ -49,9 +49,9 @@ BOOST_AUTO_TEST_CASE(test_pick_n_random_points)
std::vector<Point_d> results;
Gudhi::subsampling::pick_n_random_points(vect, 5, std::back_inserter(results));
- std::cout << "landmark vector contains: ";
+ std::clog << "landmark vector contains: ";
for (auto l: results)
- std::cout << l << "\n";
+ std::clog << l << "\n";
BOOST_CHECK(results.size() == 5);
}
diff --git a/src/Subsampling/test/test_sparsify_point_set.cpp b/src/Subsampling/test/test_sparsify_point_set.cpp
index 587ab3ad..cdcfbff5 100644
--- a/src/Subsampling/test/test_sparsify_point_set.cpp
+++ b/src/Subsampling/test/test_sparsify_point_set.cpp
@@ -34,10 +34,10 @@ BOOST_AUTO_TEST_CASE(test_sparsify_point_set)
K k;
std::vector<Point_d> results;
Gudhi::subsampling::sparsify_point_set(k, points, 0.5, std::back_inserter(results));
- std::cout << "Before sparsification: " << points.size() << " points.\n";
- std::cout << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
//for (auto p : results)
- // std::cout << p << "\n";
+ // std::clog << p << "\n";
BOOST_CHECK(points.size() > results.size());
}
diff --git a/src/Tangential_complex/benchmark/XML_exporter.h b/src/Tangential_complex/benchmark/XML_exporter.h
index 16b62eb6..38fe049f 100644
--- a/src/Tangential_complex/benchmark/XML_exporter.h
+++ b/src/Tangential_complex/benchmark/XML_exporter.h
@@ -157,7 +157,7 @@ class Streaming_XML_exporter {
m_xml_fstream << " </" << m_element_name << ">" << std::endl;
// Save current pointer position
- std::ofstream::streampos pos = m_xml_fstream.tellp();
+ auto pos = m_xml_fstream.tellp();
// Close the XML file (temporarily) so that the XML file is always correct
m_xml_fstream << "</" << m_list_name << ">" << std::endl;
// Restore the pointer position so that the next "add_element" will overwrite
diff --git a/src/Tangential_complex/benchmark/benchmark_tc.cpp b/src/Tangential_complex/benchmark/benchmark_tc.cpp
index e3b2a04f..8e7c72ff 100644
--- a/src/Tangential_complex/benchmark/benchmark_tc.cpp
+++ b/src/Tangential_complex/benchmark/benchmark_tc.cpp
@@ -33,6 +33,7 @@ const std::size_t ONLY_LOAD_THE_FIRST_N_POINTS = 20000000;
#include <gudhi/sparsify_point_set.h>
#include <gudhi/random_point_generators.h>
#include <gudhi/Tangential_complex/utilities.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/assertions_behaviour.h>
#include <CGAL/Epick_d.h>
@@ -704,7 +705,7 @@ int main() {
points = Gudhi::generate_points_on_torus_d<Kernel>(
num_points,
intrinsic_dim,
- param1 == "Y", // uniform
+ (param1 == "Y") ? "grid" : "random", // grid or random sample type
std::atof(param2.c_str())); // radius_noise_percentage
} else if (input == "generate_klein_bottle_3D") {
points = Gudhi::generate_points_on_klein_bottle_3D<Kernel>(
diff --git a/src/Tangential_complex/doc/Intro_tangential_complex.h b/src/Tangential_complex/doc/Intro_tangential_complex.h
index ce277185..cb8c6122 100644
--- a/src/Tangential_complex/doc/Intro_tangential_complex.h
+++ b/src/Tangential_complex/doc/Intro_tangential_complex.h
@@ -88,7 +88,7 @@ This example builds the Tangential complex of point set.
Note that the dimension of the kernel here is dynamic, which is slower, but more flexible:
the intrinsic and ambient dimensions does not have to be known at compile-time.
-\include Tangential_complex/example_basic.cpp
+\include example_basic.cpp
\section example_with_perturb Example with perturbation
@@ -97,7 +97,7 @@ by perturbing the positions of points involved in inconsistent simplices.
Note that the dimension of the kernel here is static, which is the best choice when the
dimensions are known at compile-time.
-\include Tangential_complex/example_with_perturb.cpp
+\include example_with_perturb.cpp
*/
/** @} */ // end defgroup tangential_complex
diff --git a/src/Tangential_complex/example/CMakeLists.txt b/src/Tangential_complex/example/CMakeLists.txt
index cb1486a4..b66b5f39 100644
--- a/src/Tangential_complex/example/CMakeLists.txt
+++ b/src/Tangential_complex/example/CMakeLists.txt
@@ -15,6 +15,4 @@ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_test(NAME Tangential_complex_example_with_perturb
COMMAND $<TARGET_FILE:Tangential_complex_example_with_perturb>)
- install(TARGETS Tangential_complex_example_basic DESTINATION bin)
- install(TARGETS Tangential_complex_example_with_perturb DESTINATION bin)
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Tangential_complex/example/example_basic.cpp b/src/Tangential_complex/example/example_basic.cpp
index ab35edf0..c50b9b8c 100644
--- a/src/Tangential_complex/example/example_basic.cpp
+++ b/src/Tangential_complex/example/example_basic.cpp
@@ -1,7 +1,6 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
-//#include <gudhi/Fake_simplex_tree.h>
-
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
@@ -39,7 +38,6 @@ int main(void) {
// Export the TC into a Simplex_tree
Gudhi::Simplex_tree<> stree;
- //Gudhi::Fake_simplex_tree stree;
tc.create_complex(stree);
// Display stats about inconsistencies
diff --git a/src/Tangential_complex/example/example_with_perturb.cpp b/src/Tangential_complex/example/example_with_perturb.cpp
index d0d877ea..e70e2980 100644
--- a/src/Tangential_complex/example/example_with_perturb.cpp
+++ b/src/Tangential_complex/example/example_with_perturb.cpp
@@ -1,5 +1,6 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index f007bdd5..ab203ca5 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -36,7 +36,6 @@
#include <Eigen/Eigen>
#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
-#include <boost/optional.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/counting_range.hpp>
@@ -56,6 +55,8 @@
#include <cmath> // for std::sqrt
#include <string>
#include <cstddef> // for std::size_t
+#include <optional>
+#include <numeric> // for std::iota
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_for.h>
@@ -345,10 +346,11 @@ class Tangential_complex {
m_stars.resize(m_points.size());
m_squared_star_spheres_radii_incl_margin.resize(m_points.size(), FT(-1));
#ifdef GUDHI_TC_PERTURB_POSITION
- if (m_points.empty())
+ if (m_points.empty()) {
m_translations.clear();
- else
+ } else {
m_translations.resize(m_points.size(), m_k.construct_vector_d_object()(m_ambient_dim));
+ }
#if defined(GUDHI_USE_TBB)
delete[] m_p_perturb_mutexes;
m_p_perturb_mutexes = new Mutex_for_perturb[m_points.size()];
@@ -623,6 +625,11 @@ class Tangential_complex {
int max_dim = -1;
+ // Ordered vertices to be inserted first by the create_complex method to avoid quadratic complexity.
+ std::vector<typename Simplex_tree_::Vertex_handle> vertices(m_points.size());
+ std::iota(vertices.begin(), vertices.end(), 0);
+ tree.insert_batch_vertices(vertices);
+
// For each triangulation
for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
// For each cell of the star
@@ -954,7 +961,11 @@ class Tangential_complex {
// Triangulation's traits functor & objects
typename Tr_traits::Compute_weight_d point_weight = local_tr_traits.compute_weight_d_object();
+#if CGAL_VERSION_NR < 1050200000
typename Tr_traits::Power_center_d power_center = local_tr_traits.power_center_d_object();
+#else
+ typename Tr_traits::Construct_power_sphere_d power_center = local_tr_traits.construct_power_sphere_d_object();
+#endif
//***************************************************
// Build a minimal triangulation in the tangent space
@@ -990,7 +1001,7 @@ class Tangential_complex {
// circumspheres of the star of "center_vertex"
// If th the m_max_squared_edge_length is set the maximal radius of the "star sphere"
// is at most square root of m_max_squared_edge_length
- boost::optional<FT> squared_star_sphere_radius_plus_margin = m_max_squared_edge_length;
+ std::optional<FT> squared_star_sphere_radius_plus_margin = m_max_squared_edge_length;
// Insert points until we find a point which is outside "star sphere"
for (auto nn_it = ins_range.begin(); nn_it != ins_range.end(); ++nn_it) {
@@ -1032,7 +1043,7 @@ class Tangential_complex {
// Let's recompute squared_star_sphere_radius_plus_margin
if (triangulation.current_dimension() >= tangent_space_dim) {
- squared_star_sphere_radius_plus_margin = boost::none;
+ squared_star_sphere_radius_plus_margin = std::nullopt;
// Get the incident cells and look for the biggest circumsphere
std::vector<Tr_full_cell_handle> incident_cells;
triangulation.incident_full_cells(center_vertex, std::back_inserter(incident_cells));
@@ -1040,7 +1051,7 @@ class Tangential_complex {
cit != incident_cells.end(); ++cit) {
Tr_full_cell_handle cell = *cit;
if (triangulation.is_infinite(cell)) {
- squared_star_sphere_radius_plus_margin = boost::none;
+ squared_star_sphere_radius_plus_margin = std::nullopt;
break;
} else {
// Note that this uses the perturbed point since it uses
@@ -1100,7 +1111,11 @@ class Tangential_complex {
std::size_t closest_pt_index = updated_pts_ds.k_nearest_neighbors(center_point, 1, false).begin()->first;
typename K::Construct_weighted_point_d k_constr_wp = m_k.construct_weighted_point_d_object();
+#if CGAL_VERSION_NR < 1050200000
typename K::Power_distance_d k_power_dist = m_k.power_distance_d_object();
+#else
+ typename K::Compute_power_product_d k_power_dist = m_k.compute_power_product_d_object();
+#endif
// Construct a weighted point equivalent to the star sphere
Weighted_point star_sphere = k_constr_wp(compute_perturbed_point(i), m_squared_star_spheres_radii_incl_margin[i]);
@@ -1144,7 +1159,7 @@ class Tangential_complex {
#ifdef GUDHI_TC_VERY_VERBOSE
std::cerr << "Inserted " << num_inserted_points << " points / " << num_attempts_to_insert_points
- << " attemps to compute the star\n";
+ << " attempts to compute the star\n";
#endif
update_star(i);
@@ -2022,7 +2037,7 @@ class Tangential_complex {
// and their center vertex
Stars_container m_stars;
std::vector<FT> m_squared_star_spheres_radii_incl_margin;
- boost::optional<FT> m_max_squared_edge_length;
+ std::optional<FT> m_max_squared_edge_length;
#ifdef GUDHI_TC_USE_ANOTHER_POINT_SET_FOR_TANGENT_SPACE_ESTIM
Points m_points_for_tse;
diff --git a/src/Tangential_complex/test/test_tangential_complex.cpp b/src/Tangential_complex/test/test_tangential_complex.cpp
index 46caec54..a24b9ae2 100644
--- a/src/Tangential_complex/test/test_tangential_complex.cpp
+++ b/src/Tangential_complex/test/test_tangential_complex.cpp
@@ -14,6 +14,7 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
@@ -76,14 +77,14 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) {
points.push_back(Point(point.size(), point.begin(), point.end()));
point = {1.0, 1.0};
points.push_back(Point(point.size(), point.begin(), point.end()));
- std::cout << "points = " << points.size() << std::endl;
+ std::clog << "points = " << points.size() << std::endl;
Kernel k;
// Compute the TC
TC tc(points, INTRINSIC_DIM, k);
tc.compute_tangential_complex();
TC::Num_inconsistencies num_inc = tc.number_of_inconsistent_simplices();
- std::cout << "TC vertices = " << tc.number_of_vertices() << " - simplices = " << num_inc.num_simplices <<
+ std::clog << "TC vertices = " << tc.number_of_vertices() << " - simplices = " << num_inc.num_simplices <<
" - inc simplices = " << num_inc.num_inconsistent_simplices <<
" - inc stars = " << num_inc.num_inconsistent_stars << std::endl;
@@ -95,7 +96,7 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) {
// Export the TC into a Simplex_tree
Gudhi::Simplex_tree<> stree;
tc.create_complex(stree);
- std::cout << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl;
+ std::clog << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl;
BOOST_CHECK(stree.num_vertices() == 4);
BOOST_CHECK(stree.num_simplices() == 6);
@@ -109,7 +110,7 @@ BOOST_AUTO_TEST_CASE(test_mini_tangential) {
// Export the TC into a Simplex_tree
tc.create_complex(stree);
- std::cout << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl;
+ std::clog << "ST vertices = " << stree.num_vertices() << " - simplices = " << stree.num_simplices() << std::endl;
BOOST_CHECK(stree.num_vertices() == 4);
BOOST_CHECK(stree.num_simplices() == 6);
@@ -139,7 +140,7 @@ BOOST_AUTO_TEST_CASE(test_basic_example_throw) {
// Compute the TC
TC tc(points, INTRINSIC_DIM, k);
tc.set_max_squared_edge_length(0.01);
- std::cout << "test_basic_example_throw - set_max_squared_edge_length(0.01) to make GUDHI_CHECK fail" << std::endl;
+ std::clog << "test_basic_example_throw - set_max_squared_edge_length(0.01) to make GUDHI_CHECK fail" << std::endl;
BOOST_CHECK_THROW(tc.compute_tangential_complex(), std::invalid_argument);
}
diff --git a/src/Toplex_map/benchmark/CMakeLists.txt b/src/Toplex_map/benchmark/CMakeLists.txt
index 2d58a156..6703d9d0 100644
--- a/src/Toplex_map/benchmark/CMakeLists.txt
+++ b/src/Toplex_map/benchmark/CMakeLists.txt
@@ -1,3 +1,7 @@
project(Toplex_map_benchmark)
add_executable(Toplex_map_benchmark benchmark_tm.cpp)
+
+if (TBB_FOUND)
+ target_link_libraries(Toplex_map_benchmark ${TBB_LIBRARIES})
+endif()
diff --git a/src/Toplex_map/benchmark/benchmark_tm.cpp b/src/Toplex_map/benchmark/benchmark_tm.cpp
index feb5d01c..d078fcf8 100644
--- a/src/Toplex_map/benchmark/benchmark_tm.cpp
+++ b/src/Toplex_map/benchmark/benchmark_tm.cpp
@@ -25,10 +25,10 @@ typedef std::pair<Simplex_tree<>::Simplex_handle, bool> typePairSimplexBool;
class ST_wrapper {
public:
void insert_simplex(const Simplex& tau) {
- /*std::cout << "insert_simplex - " << simplexTree.num_simplices() << " - ";
+ /*std::clog << "insert_simplex - " << simplexTree.num_simplices() << " - ";
for (auto v : tau)
- std::cout << v << ", ";
- std::cout << std::endl;
+ std::clog << v << ", ";
+ std::clog << std::endl;
*/
simplexTree.insert_simplex_and_subfaces(tau);
}
@@ -104,22 +104,22 @@ void chrono(int n, int d) {
auto c2 = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
if (c3 > 0)
- std::cout << c1 << "\t \t" << c2 << "\t \t" << c3 << "\t \t" << K.num_maximal_simplices() << std::endl;
+ std::clog << c1 << "\t \t" << c2 << "\t \t" << c3 << "\t \t" << K.num_maximal_simplices() << std::endl;
else
- std::cout << c1 << "\t \t" << c2 << "\t \tN/A\t \t" << K.num_maximal_simplices() << std::endl;
+ std::clog << c1 << "\t \t" << c2 << "\t \tN/A\t \t" << K.num_maximal_simplices() << std::endl;
}
int main() {
for (int d = 5; d <= 40; d += 5) {
- std::cout << "d=" << d << " \t Insertions \t Membership \t Contractions \t Size" << std::endl;
- std::cout << "T Map \t \t";
+ std::clog << "d=" << d << " \t Insertions \t Membership \t Contractions \t Size" << std::endl;
+ std::clog << "T Map \t \t";
chrono<Toplex_map>(n, d);
- std::cout << "Lazy \t \t";
+ std::clog << "Lazy \t \t";
chrono<Lazy_toplex_map>(n, d);
if (d <= 15) {
- std::cout << "ST \t \t";
+ std::clog << "ST \t \t";
chrono<ST_wrapper>(n, d);
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
}
diff --git a/src/Toplex_map/example/simple_toplex_map.cpp b/src/Toplex_map/example/simple_toplex_map.cpp
index 7538c989..c432608e 100644
--- a/src/Toplex_map/example/simple_toplex_map.cpp
+++ b/src/Toplex_map/example/simple_toplex_map.cpp
@@ -31,72 +31,72 @@ int main(int argc, char* const argv[]) {
/* o---o */
/* 1 3 */
- std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
+ std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
<< std::endl;
// Browse maximal cofaces
Simplex sigma3 = {2, 3};
- std::cout << "Maximal cofaces of {2, 3} are :" << std::endl;
+ std::clog << "Maximal cofaces of {2, 3} are :" << std::endl;
for (auto simplex_ptr : tm.maximal_cofaces(sigma3, 2)) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
Simplex sigma4 = {1, 3};
assert(tm.membership(sigma4));
Gudhi::Toplex_map::Vertex v = tm.contraction(1, 3);
- std::cout << "After contraction(1, 3) - " << v << std::endl;
+ std::clog << "After contraction(1, 3) - " << v << std::endl;
/* Simplex is: */
/* 2 4 */
/* o---o */
/* \5/ */
/* o */
/* 3 */
- std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
+ std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
<< std::endl;
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
Simplex sigma5 = {3, 4};
assert(tm.membership(sigma5));
v = tm.contraction(3, 4);
- std::cout << "After contraction(3, 4) - " << v << std::endl;
+ std::clog << "After contraction(3, 4) - " << v << std::endl;
/* Simplex is: */
/* 2 4 */
/* o---o */
/* \X/ */
/* o */
/* 5 */
- std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
+ std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
<< std::endl;
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
tm.insert_simplex(sigma1);
@@ -109,44 +109,44 @@ int main(int argc, char* const argv[]) {
/* 1 3 */
tm.remove_simplex(sigma1);
- std::cout << "After remove_simplex(1, 2, 3)" << std::endl;
+ std::clog << "After remove_simplex(1, 2, 3)" << std::endl;
/* Simplex is: */
/* 2 4 */
/* o---o */
/* / \5/ */
/* o---o */
/* 1 3 */
- std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
+ std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
<< std::endl;
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
tm.remove_vertex(1);
- std::cout << "After remove_vertex(1)" << std::endl;
+ std::clog << "After remove_vertex(1)" << std::endl;
/* Simplex is: */
/* 2 4 */
/* o---o */
/* \5/ */
/* o */
/* 3 */
- std::cout << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
+ std::clog << "num max simplices = " << tm.num_maximal_simplices() << " - num vertices = " << tm.num_vertices()
<< std::endl;
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
return 0;
diff --git a/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp b/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp
index 639bf35a..994cee8e 100644
--- a/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp
+++ b/src/Toplex_map/test/lazy_toplex_map_unit_test.cpp
@@ -20,43 +20,43 @@ BOOST_AUTO_TEST_CASE(toplex_map) {
using Vertex = Gudhi::Lazy_toplex_map::Vertex;
Gudhi::Lazy_toplex_map tm;
- std::cout << "insert_simplex {1, 2, 3, 4}" << std::endl;
+ std::clog << "insert_simplex {1, 2, 3, 4}" << std::endl;
std::vector<Vertex> sigma1 = {1, 2, 3, 4};
tm.insert_simplex(sigma1);
- std::cout << "insert_simplex {5, 2, 3, 6}" << std::endl;
+ std::clog << "insert_simplex {5, 2, 3, 6}" << std::endl;
std::vector<Vertex> sigma2 = {5, 2, 3, 6};
tm.insert_simplex(sigma2);
- std::cout << "insert_simplex {5}" << std::endl;
+ std::clog << "insert_simplex {5}" << std::endl;
std::vector<Vertex> sigma3 = {5};
tm.insert_simplex(sigma3);
- std::cout << "insert_simplex {4, 5, 3}" << std::endl;
+ std::clog << "insert_simplex {4, 5, 3}" << std::endl;
std::vector<Vertex> sigma6 = {4, 5, 3};
tm.insert_simplex(sigma6);
- std::cout << "insert_simplex {4, 5, 9}" << std::endl;
+ std::clog << "insert_simplex {4, 5, 9}" << std::endl;
std::vector<Vertex> sigma7 = {4, 5, 9};
tm.insert_simplex(sigma7);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 5);
std::vector<Vertex> sigma4 = {5, 2, 3};
std::vector<Vertex> sigma5 = {5, 2, 7};
BOOST_CHECK(tm.membership(sigma4));
BOOST_CHECK(!tm.membership(sigma5));
- std::cout << "insert_simplex {5, 2, 7}" << std::endl;
+ std::clog << "insert_simplex {5, 2, 7}" << std::endl;
tm.insert_simplex(sigma5);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 6);
BOOST_CHECK(tm.membership(sigma5));
- std::cout << "contraction(4,5)" << std::endl;
+ std::clog << "contraction(4,5)" << std::endl;
auto r = tm.contraction(4, 5);
- std::cout << "r=" << r << std::endl;
+ std::clog << "r=" << r << std::endl;
BOOST_CHECK(r == 5);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 6);
std::vector<Vertex> sigma8 = {1, 2, 3};
@@ -68,11 +68,11 @@ BOOST_AUTO_TEST_CASE(toplex_map) {
BOOST_CHECK(tm.membership(sigma8));
BOOST_CHECK(tm.membership(sigma9));
- std::cout << "remove_simplex({2, 7, r = 5})" << std::endl;
+ std::clog << "remove_simplex({2, 7, r = 5})" << std::endl;
tm.remove_simplex(sigma9);
BOOST_CHECK(!tm.membership(sigma9));
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 8);
// {2, 7, 5} is removed, but verify its edges are still there
@@ -88,71 +88,71 @@ BOOST_AUTO_TEST_CASE(toplex_map_empty_toplex) {
using Vertex = Gudhi::Lazy_toplex_map::Vertex;
Gudhi::Lazy_toplex_map tm;
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 0);
- std::cout << "num_vertices = " << tm.num_vertices() << std::endl;
+ std::clog << "num_vertices = " << tm.num_vertices() << std::endl;
BOOST_CHECK(tm.num_vertices() == 0);
- std::cout << "Check an empty simplex is a member." << std::endl;
+ std::clog << "Check an empty simplex is a member." << std::endl;
std::vector<Vertex> empty_sigma = {};
BOOST_CHECK(tm.membership(empty_sigma));
- std::cout << "Check the edge 2,7 is not a member." << std::endl;
+ std::clog << "Check the edge 2,7 is not a member." << std::endl;
std::vector<Vertex> edge = {2, 7};
BOOST_CHECK(!tm.membership(edge));
- std::cout << "Insert an empty simplex." << std::endl;
+ std::clog << "Insert an empty simplex." << std::endl;
tm.insert_simplex(empty_sigma);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 0);
- std::cout << "num_vertices = " << tm.num_vertices() << std::endl;
+ std::clog << "num_vertices = " << tm.num_vertices() << std::endl;
BOOST_CHECK(tm.num_vertices() == 0);
- std::cout << "Check an empty simplex is a member." << std::endl;
+ std::clog << "Check an empty simplex is a member." << std::endl;
BOOST_CHECK(tm.membership(empty_sigma));
- std::cout << "Check the edge 2,7 is not a member." << std::endl;
+ std::clog << "Check the edge 2,7 is not a member." << std::endl;
BOOST_CHECK(!tm.membership(edge));
- std::cout << "Insert edge 2,7." << std::endl;
+ std::clog << "Insert edge 2,7." << std::endl;
tm.insert_simplex(edge);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 1);
- std::cout << "num_vertices = " << tm.num_vertices() << std::endl;
+ std::clog << "num_vertices = " << tm.num_vertices() << std::endl;
BOOST_CHECK(tm.num_vertices() == 2);
- std::cout << "Check an empty simplex is a member." << std::endl;
+ std::clog << "Check an empty simplex is a member." << std::endl;
BOOST_CHECK(tm.membership(empty_sigma));
- std::cout << "Check the edge 2,7 is a member." << std::endl;
+ std::clog << "Check the edge 2,7 is a member." << std::endl;
BOOST_CHECK(tm.membership(edge));
- std::cout << "contraction(2,7)" << std::endl;
+ std::clog << "contraction(2,7)" << std::endl;
auto r = tm.contraction(2, 7);
- std::cout << "r=" << r << std::endl;
+ std::clog << "r=" << r << std::endl;
BOOST_CHECK(r == 7);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 1);
- std::cout << "num_vertices = " << tm.num_vertices() << std::endl;
+ std::clog << "num_vertices = " << tm.num_vertices() << std::endl;
BOOST_CHECK(tm.num_vertices() == 1);
- std::cout << "Check an empty simplex is a member." << std::endl;
+ std::clog << "Check an empty simplex is a member." << std::endl;
BOOST_CHECK(tm.membership(empty_sigma));
- std::cout << "Check the edge 2,7 is not a member." << std::endl;
+ std::clog << "Check the edge 2,7 is not a member." << std::endl;
BOOST_CHECK(!tm.membership(edge));
- std::cout << "Remove the vertex 7." << std::endl;
+ std::clog << "Remove the vertex 7." << std::endl;
std::vector<Vertex> vertex = {7};
tm.remove_simplex(vertex);
- std::cout << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices = " << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 0);
- std::cout << "num_vertices = " << tm.num_vertices() << std::endl;
+ std::clog << "num_vertices = " << tm.num_vertices() << std::endl;
BOOST_CHECK(tm.num_vertices() == 0);
- std::cout << "Check an empty simplex is a member." << std::endl;
+ std::clog << "Check an empty simplex is a member." << std::endl;
BOOST_CHECK(tm.membership(empty_sigma));
- std::cout << "Check the edge 2,7 is not a member." << std::endl;
+ std::clog << "Check the edge 2,7 is not a member." << std::endl;
BOOST_CHECK(!tm.membership(edge));
}
diff --git a/src/Toplex_map/test/toplex_map_unit_test.cpp b/src/Toplex_map/test/toplex_map_unit_test.cpp
index 24ec679b..0d0751ff 100644
--- a/src/Toplex_map/test/toplex_map_unit_test.cpp
+++ b/src/Toplex_map/test/toplex_map_unit_test.cpp
@@ -20,31 +20,31 @@ BOOST_AUTO_TEST_CASE(toplex_map) {
using Vertex = Gudhi::Toplex_map::Vertex;
Gudhi::Toplex_map tm;
- std::cout << "insert_simplex {1, 2, 3, 4}" << std::endl;
+ std::clog << "insert_simplex {1, 2, 3, 4}" << std::endl;
std::vector<Vertex> sigma1 = {1, 2, 3, 4};
tm.insert_simplex(sigma1);
- std::cout << "insert_simplex {5, 2, 3, 6}" << std::endl;
+ std::clog << "insert_simplex {5, 2, 3, 6}" << std::endl;
std::vector<Vertex> sigma2 = {5, 2, 3, 6};
tm.insert_simplex(sigma2);
- std::cout << "insert_simplex {5}" << std::endl;
+ std::clog << "insert_simplex {5}" << std::endl;
std::vector<Vertex> sigma3 = {5};
tm.insert_simplex(sigma3);
- std::cout << "insert_simplex {4, 5, 3}" << std::endl;
+ std::clog << "insert_simplex {4, 5, 3}" << std::endl;
std::vector<Vertex> sigma6 = {4, 5, 3};
tm.insert_simplex(sigma6);
- std::cout << "insert_simplex {4, 5, 9}" << std::endl;
+ std::clog << "insert_simplex {4, 5, 9}" << std::endl;
std::vector<Vertex> sigma7 = {4, 5, 9};
tm.insert_simplex(sigma7);
- std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 4);
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
BOOST_CHECK(tm.maximality(*simplex_ptr));
}
@@ -58,37 +58,37 @@ BOOST_AUTO_TEST_CASE(toplex_map) {
std::vector<Vertex> sigma5 = {5, 2, 7};
BOOST_CHECK(tm.membership(sigma4));
BOOST_CHECK(!tm.membership(sigma5));
- std::cout << "insert_simplex {5, 2, 7}" << std::endl;
+ std::clog << "insert_simplex {5, 2, 7}" << std::endl;
tm.insert_simplex(sigma5);
- std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 5);
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
BOOST_CHECK(tm.maximality(*simplex_ptr));
}
BOOST_CHECK(tm.membership(sigma5));
- std::cout << "contraction(4,5)" << std::endl;
+ std::clog << "contraction(4,5)" << std::endl;
auto r = tm.contraction(4, 5);
- std::cout << "r=" << r << std::endl;
+ std::clog << "r=" << r << std::endl;
BOOST_CHECK(r == 5);
- std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 4);
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
BOOST_CHECK(tm.maximality(*simplex_ptr));
}
@@ -101,19 +101,19 @@ BOOST_AUTO_TEST_CASE(toplex_map) {
BOOST_CHECK(tm.membership(sigma8));
BOOST_CHECK(tm.membership(sigma9));
- std::cout << "remove_simplex({2, 7, r = 5})" << std::endl;
+ std::clog << "remove_simplex({2, 7, r = 5})" << std::endl;
tm.remove_simplex(sigma9);
BOOST_CHECK(!tm.membership(sigma9));
- std::cout << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
+ std::clog << "num_maximal_simplices" << tm.num_maximal_simplices() << std::endl;
BOOST_CHECK(tm.num_maximal_simplices() == 5);
// Browse maximal simplices
- std::cout << "Maximal simplices are :" << std::endl;
+ std::clog << "Maximal simplices are :" << std::endl;
for (auto simplex_ptr : tm.maximal_simplices()) {
for (auto v : *simplex_ptr) {
- std::cout << v << ", ";
+ std::clog << v << ", ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
BOOST_CHECK(tm.maximality(*simplex_ptr));
}
// {2, 7, 5} is removed, but verify its edges are still there
diff --git a/src/Witness_complex/doc/Witness_complex_doc.h b/src/Witness_complex/doc/Witness_complex_doc.h
index 62203054..c66b106e 100644
--- a/src/Witness_complex/doc/Witness_complex_doc.h
+++ b/src/Witness_complex/doc/Witness_complex_doc.h
@@ -92,11 +92,11 @@ int main(int argc, char * const argv[]) {
// Choose landmarks (one can choose either of the two methods below)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, nbL,
+ Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
// Compute witness complex
- Witness_complex witness_complex(landmarks,
- point_vector);
+ Witness_complex witness_complex(landmarks, point_vector);
witness_complex.create_complex(simplex_tree, alpha2, lim_dim);
}
@@ -108,14 +108,14 @@ int main(int argc, char * const argv[]) {
Here is an example of constructing a strong witness complex filtration and computing persistence on it:
- \include Witness_complex/strong_witness_persistence.cpp
+ \include strong_witness_persistence.cpp
\section witnessexample3 Example3: Computing relaxed witness complex persistence from a distance matrix
In this example we compute the relaxed witness complex persistence from a given matrix of closest landmarks to each witness.
Each landmark is given as the couple (index, distance).
- \include Witness_complex/example_nearest_landmark_table.cpp
+ \include example_nearest_landmark_table.cpp
*/
diff --git a/src/Witness_complex/example/CMakeLists.txt b/src/Witness_complex/example/CMakeLists.txt
index 2659798e..5e9736ed 100644
--- a/src/Witness_complex/example/CMakeLists.txt
+++ b/src/Witness_complex/example/CMakeLists.txt
@@ -7,8 +7,6 @@ endif()
add_test(NAME Witness_complex_example_nearest_landmark_table
COMMAND $<TARGET_FILE:Witness_complex_example_nearest_landmark_table>)
-install(TARGETS Witness_complex_example_nearest_landmark_table DESTINATION bin)
-
# CGAL and Eigen3 are required for Euclidean version of Witness
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable( Witness_complex_example_off example_witness_complex_off.cpp )
@@ -33,10 +31,5 @@ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_test(NAME Witness_complex_example_strong_off_test_torus
COMMAND $<TARGET_FILE:Witness_complex_example_strong_off>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "20" "1.0" "3")
-
- install(TARGETS Witness_complex_example_off DESTINATION bin)
- install(TARGETS Witness_complex_example_sphere DESTINATION bin)
- install(TARGETS Witness_complex_example_strong_off DESTINATION bin)
-
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Witness_complex/example/example_nearest_landmark_table.cpp b/src/Witness_complex/example/example_nearest_landmark_table.cpp
index 441900c1..14101847 100644
--- a/src/Witness_complex/example/example_nearest_landmark_table.cpp
+++ b/src/Witness_complex/example/example_nearest_landmark_table.cpp
@@ -33,7 +33,7 @@ int main(int argc, char * const argv[]) {
Witness_complex witness_complex(nlt);
witness_complex.create_complex(simplex_tree, .41);
- std::cout << "Number of simplices: " << simplex_tree.num_simplices() << std::endl;
+ std::clog << "Number of simplices: " << simplex_tree.num_simplices() << std::endl;
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Witness_complex/example/example_strong_witness_complex_off.cpp b/src/Witness_complex/example/example_strong_witness_complex_off.cpp
index 19f73836..2bb135bf 100644
--- a/src/Witness_complex/example/example_strong_witness_complex_off.cpp
+++ b/src/Witness_complex/example/example_strong_witness_complex_off.cpp
@@ -38,12 +38,13 @@ int main(int argc, char* const argv[]) {
}
point_vector = Point_vector(off_reader.get_point_cloud());
- std::cout << "Successfully read " << point_vector.size() << " points.\n";
- std::cout << "Ambient dimension is " << point_vector[0].dimension() << ".\n";
+ std::clog << "Successfully read " << point_vector.size() << " points.\n";
+ std::clog << "Ambient dimension is " << point_vector[0].dimension() << ".\n";
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector,
+ nbL, Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
@@ -52,6 +53,6 @@ int main(int argc, char* const argv[]) {
witness_complex.create_complex(simplex_tree, alpha2, lim_dim);
end = clock();
- std::cout << "Strong witness complex took " << static_cast<double>(end - start) / CLOCKS_PER_SEC << " s. \n";
- std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
+ std::clog << "Strong witness complex took " << static_cast<double>(end - start) / CLOCKS_PER_SEC << " s. \n";
+ std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
}
diff --git a/src/Witness_complex/example/example_witness_complex_off.cpp b/src/Witness_complex/example/example_witness_complex_off.cpp
index be11c955..e1384c73 100644
--- a/src/Witness_complex/example/example_witness_complex_off.cpp
+++ b/src/Witness_complex/example/example_witness_complex_off.cpp
@@ -42,12 +42,13 @@ int main(int argc, char * const argv[]) {
}
point_vector = Point_vector(off_reader.get_point_cloud());
- std::cout << "Successfully read " << point_vector.size() << " points.\n";
- std::cout << "Ambient dimension is " << point_vector[0].dimension() << ".\n";
+ std::clog << "Successfully read " << point_vector.size() << " points.\n";
+ std::clog << "Ambient dimension is " << point_vector[0].dimension() << ".\n";
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, nbL,
+ Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
// Compute witness complex
start = clock();
@@ -56,7 +57,7 @@ int main(int argc, char * const argv[]) {
witness_complex.create_complex(simplex_tree, alpha2, lim_dim);
end = clock();
- std::cout << "Witness complex took "
+ std::clog << "Witness complex took "
<< static_cast<double>(end - start) / CLOCKS_PER_SEC << " s. \n";
- std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
+ std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
}
diff --git a/src/Witness_complex/example/example_witness_complex_sphere.cpp b/src/Witness_complex/example/example_witness_complex_sphere.cpp
index 9e3c972d..12a56de4 100644
--- a/src/Witness_complex/example/example_witness_complex_sphere.cpp
+++ b/src/Witness_complex/example/example_witness_complex_sphere.cpp
@@ -47,13 +47,13 @@ int main(int argc, char* const argv[]) {
Gudhi::Simplex_tree<> simplex_tree;
Point_Vector point_vector, landmarks;
generate_points_sphere(point_vector, nbP, 4);
- std::cout << "Successfully generated " << point_vector.size() << " points.\n";
- std::cout << "Ambient dimension is " << point_vector[0].size() << ".\n";
+ std::clog << "Successfully generated " << point_vector.size() << " points.\n";
+ std::clog << "Ambient dimension is " << point_vector[0].size() << ".\n";
// Choose landmarks
start = clock();
// Gudhi::subsampling::pick_n_random_points(point_vector, number_of_landmarks, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, number_of_landmarks,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, number_of_landmarks,
Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
@@ -62,8 +62,8 @@ int main(int argc, char* const argv[]) {
witness_complex.create_complex(simplex_tree, 0);
end = clock();
double time = static_cast<double>(end - start) / CLOCKS_PER_SEC;
- std::cout << "Witness complex for " << number_of_landmarks << " landmarks took " << time << " s. \n";
- std::cout << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
+ std::clog << "Witness complex for " << number_of_landmarks << " landmarks took " << time << " s. \n";
+ std::clog << "Number of simplices is: " << simplex_tree.num_simplices() << "\n";
l_time.push_back(std::make_pair(nbP, time));
}
write_data(l_time, "w_time.dat");
diff --git a/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h b/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
index 2ae1d6e0..1aebb045 100644
--- a/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
+++ b/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
@@ -18,7 +18,7 @@ namespace Gudhi {
namespace witness_complex {
- /* \class Active_witness
+ /** \class Active_witness
* \brief Class representing a list of nearest neighbors to a given witness.
* \details Every element is a pair of a landmark identifier and the squared distance to it.
*/
diff --git a/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h b/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
index 4f8fddba..18f19650 100644
--- a/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
+++ b/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
@@ -18,7 +18,7 @@ namespace Gudhi {
namespace witness_complex {
-/* \brief Iterator in the nearest landmark list.
+/** \brief Iterator in the nearest landmark list.
* \details After the iterator reaches the end of the list,
* the list is augmented by a (nearest landmark, distance) pair if possible.
* If all the landmarks are present in the list, iterator returns the specific end value
diff --git a/src/Witness_complex/include/gudhi/Strong_witness_complex.h b/src/Witness_complex/include/gudhi/Strong_witness_complex.h
index b3699f77..ddc0da32 100644
--- a/src/Witness_complex/include/gudhi/Strong_witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Strong_witness_complex.h
@@ -125,7 +125,7 @@ class Strong_witness_complex {
//@}
private:
- /* \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
+ /** \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
* Iterator is needed to know until how far we can take landmarks to form simplexes.
* simplex is the prefix of the simplexes to insert.
* The landmark pointed by aw_it is added to all formed simplices.
diff --git a/src/Witness_complex/include/gudhi/Witness_complex.h b/src/Witness_complex/include/gudhi/Witness_complex.h
index d655c7f6..66ae7af2 100644
--- a/src/Witness_complex/include/gudhi/Witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Witness_complex.h
@@ -127,7 +127,7 @@ class Witness_complex {
//@}
private:
- /* \brief Adds recursively all the faces of a certain dimension dim witnessed by the same witness.
+ /** \brief Adds recursively all the faces of a certain dimension dim witnessed by the same witness.
* Iterator is needed to know until how far we can take landmarks to form simplexes.
* simplex is the prefix of the simplexes to insert.
* The output value indicates if the witness rests active or not.
diff --git a/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h b/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
index 5845728a..007ab084 100644
--- a/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
+++ b/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
@@ -11,7 +11,7 @@
#ifndef WITNESS_COMPLEX_ALL_FACES_IN_H_
#define WITNESS_COMPLEX_ALL_FACES_IN_H_
-/* \brief Check if the facets of the k-dimensional simplex witnessed
+/** \brief Check if the facets of the k-dimensional simplex witnessed
* by witness witness_id are already in the complex.
* inserted_vertex is the handle of the (k+1)-th vertex witnessed by witness_id
*/
diff --git a/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp b/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp
index 4f718203..9b19f6dc 100644
--- a/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp
+++ b/src/Witness_complex/test/test_euclidean_simple_witness_complex.cpp
@@ -82,12 +82,12 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
witnesses);
eucl_witness_complex.create_complex(complex, 0);
- std::cout << "complex.num_simplices() = " << complex.num_simplices() << std::endl;
+ std::clog << "complex.num_simplices() = " << complex.num_simplices() << std::endl;
BOOST_CHECK(complex.num_simplices() == 24);
eucl_witness_complex.create_complex(relaxed_complex, 8.01);
- std::cout << "relaxed_complex.num_simplices() = " << relaxed_complex.num_simplices() << std::endl;
+ std::clog << "relaxed_complex.num_simplices() = " << relaxed_complex.num_simplices() << std::endl;
BOOST_CHECK(relaxed_complex.num_simplices() == 239);
// The corner simplex {0,2,5,7} and its cofaces are missing.
@@ -95,12 +95,12 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
WitnessComplex witness_complex(nearest_landmark_table);
witness_complex.create_complex(complex_ne, 0);
- std::cout << "complex.num_simplices() = " << complex_ne.num_simplices() << std::endl;
+ std::clog << "complex.num_simplices() = " << complex_ne.num_simplices() << std::endl;
BOOST_CHECK(complex_ne.num_simplices() == 24);
witness_complex.create_complex(relaxed_complex_ne, 8.01);
- std::cout << "relaxed_complex.num_simplices() = " << relaxed_complex_ne.num_simplices() << std::endl;
+ std::clog << "relaxed_complex.num_simplices() = " << relaxed_complex_ne.num_simplices() << std::endl;
BOOST_CHECK(relaxed_complex_ne.num_simplices() == 239);
@@ -111,10 +111,10 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
eucl_strong_witness_complex.create_complex(strong_relaxed_complex, 9.1);
eucl_strong_witness_complex.create_complex(strong_relaxed_complex2, 9.1, 2);
- std::cout << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex.num_simplices() << std::endl;
+ std::clog << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex.num_simplices() << std::endl;
BOOST_CHECK(strong_relaxed_complex.num_simplices() == 239);
- std::cout << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2.num_simplices() << std::endl;
+ std::clog << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2.num_simplices() << std::endl;
BOOST_CHECK(strong_relaxed_complex2.num_simplices() == 92);
@@ -124,10 +124,10 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
strong_witness_complex.create_complex(strong_relaxed_complex_ne, 9.1);
strong_witness_complex.create_complex(strong_relaxed_complex2_ne, 9.1, 2);
- std::cout << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex_ne.num_simplices() << std::endl;
+ std::clog << "strong_relaxed_complex.num_simplices() = " << strong_relaxed_complex_ne.num_simplices() << std::endl;
BOOST_CHECK(strong_relaxed_complex_ne.num_simplices() == 239);
- std::cout << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2_ne.num_simplices() << std::endl;
+ std::clog << "strong_relaxed_complex2.num_simplices() = " << strong_relaxed_complex2_ne.num_simplices() << std::endl;
BOOST_CHECK(strong_relaxed_complex2_ne.num_simplices() == 92);
diff --git a/src/Witness_complex/test/test_simple_witness_complex.cpp b/src/Witness_complex/test/test_simple_witness_complex.cpp
index 9e3509d3..7c48cc54 100644
--- a/src/Witness_complex/test/test_simple_witness_complex.cpp
+++ b/src/Witness_complex/test/test_simple_witness_complex.cpp
@@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
Witness_complex witness_complex(nlt);
BOOST_CHECK(witness_complex.create_complex(stree, 4.1));
- std::cout << "Number of simplices: " << stree.num_simplices() << std::endl;
+ std::clog << "Number of simplices: " << stree.num_simplices() << std::endl;
BOOST_CHECK(stree.num_simplices() == 31);
// Check when complex not empty
@@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(simple_witness_complex) {
BOOST_CHECK(!witness_complex.create_complex(stree2, -0.02));
witness_complex.create_complex(stree2, 4.1, 2);
- std::cout << "Number of simplices: " << stree2.num_simplices() << std::endl;
+ std::clog << "Number of simplices: " << stree2.num_simplices() << std::endl;
BOOST_CHECK(stree2.num_simplices() == 25);
}
diff --git a/src/Witness_complex/utilities/CMakeLists.txt b/src/Witness_complex/utilities/CMakeLists.txt
index 3ee0c2f6..60fea0b4 100644
--- a/src/Witness_complex/utilities/CMakeLists.txt
+++ b/src/Witness_complex/utilities/CMakeLists.txt
@@ -2,26 +2,26 @@ project(Witness_complex_utilities)
# CGAL and Eigen3 are required for Euclidean version of Witness
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
-
- add_executable ( Witness_complex_strong_witness_persistence strong_witness_persistence.cpp )
- target_link_libraries(Witness_complex_strong_witness_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
+ if(TARGET Boost::program_options)
+ add_executable ( Witness_complex_strong_witness_persistence strong_witness_persistence.cpp )
+ target_link_libraries(Witness_complex_strong_witness_persistence Boost::program_options)
- add_executable ( Witness_complex_weak_witness_persistence weak_witness_persistence.cpp )
- target_link_libraries(Witness_complex_weak_witness_persistence ${Boost_PROGRAM_OPTIONS_LIBRARY})
+ add_executable ( Witness_complex_weak_witness_persistence weak_witness_persistence.cpp )
+ target_link_libraries(Witness_complex_weak_witness_persistence Boost::program_options)
- if (TBB_FOUND)
- target_link_libraries(Witness_complex_strong_witness_persistence ${TBB_LIBRARIES})
- target_link_libraries(Witness_complex_weak_witness_persistence ${TBB_LIBRARIES})
- endif()
-
- add_test(NAME Witness_complex_strong_test_torus_persistence
- COMMAND $<TARGET_FILE:Witness_complex_strong_witness_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-l" "20" "-a" "0.5")
- add_test(NAME Witness_complex_weak_test_torus_persistence
- COMMAND $<TARGET_FILE:Witness_complex_weak_witness_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-l" "20" "-a" "0.5")
+ if (TBB_FOUND)
+ target_link_libraries(Witness_complex_strong_witness_persistence ${TBB_LIBRARIES})
+ target_link_libraries(Witness_complex_weak_witness_persistence ${TBB_LIBRARIES})
+ endif()
- install(TARGETS Witness_complex_strong_witness_persistence DESTINATION bin)
- install(TARGETS Witness_complex_weak_witness_persistence DESTINATION bin)
+ add_test(NAME Witness_complex_strong_test_torus_persistence
+ COMMAND $<TARGET_FILE:Witness_complex_strong_witness_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-l" "20" "-a" "0.5")
+ add_test(NAME Witness_complex_weak_test_torus_persistence
+ COMMAND $<TARGET_FILE:Witness_complex_weak_witness_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" "-l" "20" "-a" "0.5")
+ install(TARGETS Witness_complex_strong_witness_persistence DESTINATION bin)
+ install(TARGETS Witness_complex_weak_witness_persistence DESTINATION bin)
+ endif()
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Witness_complex/utilities/strong_witness_persistence.cpp b/src/Witness_complex/utilities/strong_witness_persistence.cpp
index 75ba1f4b..b2ecad82 100644
--- a/src/Witness_complex/utilities/strong_witness_persistence.cpp
+++ b/src/Witness_complex/utilities/strong_witness_persistence.cpp
@@ -56,12 +56,13 @@ int main(int argc, char* argv[]) {
exit(-1); // ----- >>
}
witnesses = Point_vector(off_reader.get_point_cloud());
- std::cout << "Successfully read " << witnesses.size() << " points.\n";
- std::cout << "Ambient dimension is " << witnesses[0].dimension() << ".\n";
+ std::clog << "Successfully read " << witnesses.size() << " points.\n";
+ std::clog << "Ambient dimension is " << witnesses[0].dimension() << ".\n";
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), witnesses, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), witnesses, nbL,
+ Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
@@ -69,8 +70,8 @@ int main(int argc, char* argv[]) {
strong_witness_complex.create_complex(simplex_tree, max_squared_alpha, lim_d);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Sort the simplices in the order of the filtration
simplex_tree.initialize_filtration();
@@ -107,7 +108,7 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s
visible.add_options()("help,h", "produce help message")("landmarks,l", po::value<int>(&nbL),
"Number of landmarks to choose from the point cloud.")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-sq-alpha,a", po::value<Filtration_value>(&max_squared_alpha)->default_value(default_alpha),
"Maximal squared relaxation parameter.")(
"field-charac,p", po::value<int>(&p)->default_value(11),
@@ -128,17 +129,17 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Strong witness complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Strong witness complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Witness_complex/utilities/weak_witness_persistence.cpp b/src/Witness_complex/utilities/weak_witness_persistence.cpp
index 0e5b9cc1..c7ead7de 100644
--- a/src/Witness_complex/utilities/weak_witness_persistence.cpp
+++ b/src/Witness_complex/utilities/weak_witness_persistence.cpp
@@ -56,12 +56,13 @@ int main(int argc, char* argv[]) {
exit(-1); // ----- >>
}
witnesses = Point_vector(off_reader.get_point_cloud());
- std::cout << "Successfully read " << witnesses.size() << " points.\n";
- std::cout << "Ambient dimension is " << witnesses[0].dimension() << ".\n";
+ std::clog << "Successfully read " << witnesses.size() << " points.\n";
+ std::clog << "Ambient dimension is " << witnesses[0].dimension() << ".\n";
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), witnesses, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), witnesses, nbL,
+ Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
@@ -69,8 +70,8 @@ int main(int argc, char* argv[]) {
witness_complex.create_complex(simplex_tree, max_squared_alpha, lim_d);
- std::cout << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
- std::cout << " and has dimension " << simplex_tree.dimension() << " \n";
+ std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
+ std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
// Sort the simplices in the order of the filtration
simplex_tree.initialize_filtration();
@@ -107,7 +108,7 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s
visible.add_options()("help,h", "produce help message")("landmarks,l", po::value<int>(&nbL),
"Number of landmarks to choose from the point cloud.")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
- "Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "Name of file in which the persistence diagram is written. Default print in standard output")(
"max-sq-alpha,a", po::value<Filtration_value>(&max_squared_alpha)->default_value(default_alpha),
"Maximal squared relaxation parameter.")(
"field-charac,p", po::value<int>(&p)->default_value(11),
@@ -128,17 +129,17 @@ void program_options(int argc, char* argv[], int& nbL, std::string& file_name, s
po::notify(vm);
if (vm.count("help") || !vm.count("input-file")) {
- std::cout << std::endl;
- std::cout << "Compute the persistent homology with coefficient field Z/pZ \n";
- std::cout << "of a Weak witness complex defined on a set of input points.\n \n";
- std::cout << "The output diagram contains one bar per line, written with the convention: \n";
- std::cout << " p dim b d \n";
- std::cout << "where dim is the dimension of the homological feature,\n";
- std::cout << "b and d are respectively the birth and death of the feature and \n";
- std::cout << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
-
- std::cout << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::cout << visible << std::endl;
+ std::clog << std::endl;
+ std::clog << "Compute the persistent homology with coefficient field Z/pZ \n";
+ std::clog << "of a Weak witness complex defined on a set of input points.\n \n";
+ std::clog << "The output diagram contains one bar per line, written with the convention: \n";
+ std::clog << " p dim b d \n";
+ std::clog << "where dim is the dimension of the homological feature,\n";
+ std::clog << "b and d are respectively the birth and death of the feature and \n";
+ std::clog << "p is the characteristic of the field Z/pZ used for homology coefficients." << std::endl << std::endl;
+
+ std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
+ std::clog << visible << std::endl;
exit(-1);
}
}
diff --git a/src/Witness_complex/utilities/witnesscomplex.md b/src/Witness_complex/utilities/witnesscomplex.md
index 7ea397b9..e994e0b8 100644
--- a/src/Witness_complex/utilities/witnesscomplex.md
+++ b/src/Witness_complex/utilities/witnesscomplex.md
@@ -29,7 +29,7 @@ and `p` is the characteristic of the field *Z/pZ* used for homology coefficients
* `-h [ --help ]` Produce help message
* `-l [ --landmarks ]` Number of landmarks to choose from the point cloud.
-* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::cout.
+* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in standard output.
* `-a [ --max-sq-alpha ]` (default = inf) Maximal squared relaxation parameter.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
@@ -60,7 +60,7 @@ and `p` is the characteristic of the field *Z/pZ* used for homology coefficients
* `-h [ --help ]` Produce help message
* `-l [ --landmarks ]` Number of landmarks to choose from the point cloud.
-* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in std::cout.
+* `-o [ --output-file ]` Name of file in which the persistence diagram is written. By default, print in standard output.
* `-a [ --max-sq-alpha ]` (default = inf) Maximal squared relaxation parameter.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
diff --git a/src/cmake/modules/FindTBB.cmake b/src/cmake/modules/FindTBB.cmake
index 13f4d929..e6c42dc7 100644
--- a/src/cmake/modules/FindTBB.cmake
+++ b/src/cmake/modules/FindTBB.cmake
@@ -34,7 +34,7 @@
#
# GvdB: Mac OS X distribution places libraries directly in lib directory.
#
-# For backwards compatibility, you may explicitely set the CMake variables TBB_ARCHITECTURE and TBB_COMPILER.
+# For backwards compatibility, you may explicitly set the CMake variables TBB_ARCHITECTURE and TBB_COMPILER.
# TBB_ARCHITECTURE [ ia32 | em64t | itanium ]
# which architecture to use
# TBB_COMPILER e.g. vc9 or cc3.2.3_libc2.3.2_kernel2.4.21 or cc4.0.1_os10.4.9
@@ -54,8 +54,8 @@
# TBB_MALLOC_DEBUG_LIBRARY, the TBB debug malloc library
# TBB_FOUND, If false, don't try to use TBB.
# TBB_INTERFACE_VERSION, as defined in tbb/tbb_stddef.h
-# TBB_MALLOCPROXY_DEBUG_LIBRARY, the TBB debug malloc_proxy library (not included in TBB_LIBRARIES since it's optionnal)
-# TBB_MALLOCPROXY_RELEASE_LIBRARY, the TBB release malloc_proxy library (not included in TBB_LIBRARIES since it's optionnal)
+# TBB_MALLOCPROXY_DEBUG_LIBRARY, the TBB debug malloc_proxy library (not included in TBB_LIBRARIES since it's optional)
+# TBB_MALLOCPROXY_RELEASE_LIBRARY, the TBB release malloc_proxy library (not included in TBB_LIBRARIES since it's optional)
include(CheckCXXSourceCompiles)
diff --git a/src/cmake/modules/GUDHI_boost_test.cmake b/src/cmake/modules/GUDHI_boost_test.cmake
index c3b29883..4a13404b 100644
--- a/src/cmake/modules/GUDHI_boost_test.cmake
+++ b/src/cmake/modules/GUDHI_boost_test.cmake
@@ -9,7 +9,6 @@ if (WITH_GUDHI_BOOST_TEST_COVERAGE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg")
endif()
set(GUDHI_UT_LOG_FORMAT "--log_format=XML")
- set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml")
set(GUDHI_UT_LOG_LEVEL "--log_level=test_suite")
set(GUDHI_UT_REPORT_LEVEL "--report_level=no")
else (WITH_GUDHI_BOOST_TEST_COVERAGE)
@@ -19,7 +18,11 @@ else (WITH_GUDHI_BOOST_TEST_COVERAGE)
endif(WITH_GUDHI_BOOST_TEST_COVERAGE)
function(gudhi_add_boost_test unitary_test)
- target_link_libraries(${unitary_test} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ if (WITH_GUDHI_BOOST_TEST_COVERAGE)
+ set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml")
+ endif(WITH_GUDHI_BOOST_TEST_COVERAGE)
+
+ target_link_libraries(${unitary_test} Boost::unit_test_framework)
add_test(NAME ${unitary_test} COMMAND $<TARGET_FILE:${unitary_test}>
${GUDHI_UT_LOG_FORMAT} ${GUDHI_UT_LOG_SINK}
${GUDHI_UT_LOG_LEVEL} ${GUDHI_UT_REPORT_LEVEL})
diff --git a/src/cmake/modules/GUDHI_compilation_flags.cmake b/src/cmake/modules/GUDHI_compilation_flags.cmake
index 34c2e065..b43ccf73 100644
--- a/src/cmake/modules/GUDHI_compilation_flags.cmake
+++ b/src/cmake/modules/GUDHI_compilation_flags.cmake
@@ -1,7 +1,6 @@
# This files manage compilation flags required by GUDHI
include(TestCXXAcceptsFlag)
-include(CheckCXXSourceCompiles)
# add a compiler flag only if it is accepted
macro(add_cxx_compiler_flag _flag)
@@ -12,33 +11,8 @@ macro(add_cxx_compiler_flag _flag)
endif()
endmacro()
-function(can_cgal_use_cxx11_thread_local)
- # This is because of https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/tss.h
- # CGAL is using boost thread if thread_local is not ready (requires XCode 8 for Mac).
- # The test in https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/config.h
- # #if __has_feature(cxx_thread_local) || \
- # ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \
- # ( _MSC_VER >= 1900 )
- # #define CGAL_CAN_USE_CXX11_THREAD_LOCAL
- # #endif
- set(CGAL_CAN_USE_CXX11_THREAD_LOCAL "
- int main() {
- #ifndef __has_feature
- #define __has_feature(x) 0 // Compatibility with non-clang compilers.
- #endif
- #if __has_feature(cxx_thread_local) || \
- ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \
- ( _MSC_VER >= 1900 )
- bool has_feature_thread_local = true;
- #else
- // Explicit error of compilation for CMake test purpose - has_feature_thread_local is not defined
- #endif
- bool result = has_feature_thread_local;
- } ")
- check_cxx_source_compiles("${CGAL_CAN_USE_CXX11_THREAD_LOCAL}" CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
-endfunction()
-
-set (CMAKE_CXX_STANDARD 14)
+set (CMAKE_CXX_STANDARD 17)
+# This number needs to be changed in python/CMakeLists.txt at the same time
enable_testing()
@@ -58,16 +32,6 @@ if (DEBUG_TRACES)
add_definitions(-DDEBUG_TRACES)
endif()
-set(GUDHI_CAN_USE_CXX11_THREAD_LOCAL "
- int main() {
- thread_local int result = 0;
- return result;
- } ")
-check_cxx_source_compiles("${GUDHI_CAN_USE_CXX11_THREAD_LOCAL}" GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
-if (GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
- add_definitions(-DGUDHI_CAN_USE_CXX11_THREAD_LOCAL)
-endif()
-
if(CMAKE_BUILD_TYPE MATCHES Debug)
message("++ Debug compilation flags are: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}")
else()
diff --git a/src/cmake/modules/GUDHI_doxygen_target.cmake b/src/cmake/modules/GUDHI_doxygen_target.cmake
index 7a84c4e0..327513da 100644
--- a/src/cmake/modules/GUDHI_doxygen_target.cmake
+++ b/src/cmake/modules/GUDHI_doxygen_target.cmake
@@ -8,14 +8,61 @@ if(DOXYGEN_FOUND)
get_property(DOXYGEN_EXECUTABLE TARGET Doxygen::doxygen PROPERTY IMPORTED_LOCATION)
endif()
- add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${GUDHI_USER_VERSION_DIR}/Doxyfile
- WORKING_DIRECTORY ${GUDHI_USER_VERSION_DIR}
- COMMENT "Generating API documentation with Doxygen in ${GUDHI_USER_VERSION_DIR}/doc/html/" VERBATIM)
+ message("++ Project = ${CMAKE_PROJECT_NAME}")
+ if (CMAKE_PROJECT_NAME STREQUAL "GUDHIdev")
+ # Set Doxyfile.in variables for the developer version
+ set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}/src")
+ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/")
+ set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/ \\ \n")
+ endif()
+ endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/common/doc")
+ set(GUDHI_DOXYGEN_UTILS_PATH "*/utilities")
+ endif()
+ if (CMAKE_PROJECT_NAME STREQUAL "GUDHI")
+ # Set Doxyfile.in variables for the user version
+ set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}")
+ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/common")
+ set(GUDHI_DOXYGEN_UTILS_PATH "utilities/*")
+ endif()
- if(TARGET user_version)
- # In dev version, doxygen target depends on user_version target. Not existing in user version
- add_dependencies(doxygen user_version)
+ message("++ Doxygen version ${DOXYGEN_VERSION}")
+ if (DOXYGEN_VERSION VERSION_LESS 1.9.3)
+ set(GUDHI_DOXYGEN_CLASS_DIAGRAMS "CLASS_DIAGRAMS = NO")
+ else()
+ set(GUDHI_DOXYGEN_CLASS_DIAGRAMS "")
+ endif()
+ if (DOXYGEN_VERSION VERSION_LESS 1.9.2)
+ set(GUDHI_DOXYGEN_MATHJAX_VERSION "MATHJAX_VERSION = MathJax_2")
+ set(GUDHI_DOXYGEN_MATHJAX_EXTENSIONS "TeX/AMSmath TeX/AMSsymbols")
+ else()
+ set(GUDHI_DOXYGEN_MATHJAX_VERSION "MATHJAX_VERSION = MathJax_3")
+ set(GUDHI_DOXYGEN_MATHJAX_EXTENSIONS "ams")
endif()
+
+ configure_file(${GUDHI_DOXYGEN_SOURCE_PREFIX}/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile" @ONLY)
+
+ add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Generating API documentation with Doxygen in 'html' directory" VERBATIM)
else()
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "cpp-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
endif()
diff --git a/src/cmake/modules/GUDHI_modules.cmake b/src/cmake/modules/GUDHI_modules.cmake
index aab1dd08..ec1f756b 100644
--- a/src/cmake/modules/GUDHI_modules.cmake
+++ b/src/cmake/modules/GUDHI_modules.cmake
@@ -2,7 +2,7 @@
set(GUDHI_MODULES_FULL_LIST "")
function(add_gudhi_module file_path)
- option("WITH_MODULE_GUDHI_${file_path}" "Activate/desactivate ${file_path} compilation and installation" ON)
+ option("WITH_MODULE_GUDHI_${file_path}" "Activate/deactivate ${file_path} compilation and installation" ON)
if (WITH_MODULE_GUDHI_${file_path})
set(GUDHI_MODULES ${GUDHI_MODULES} ${file_path} CACHE INTERNAL "GUDHI_MODULES")
else()
@@ -10,19 +10,13 @@ function(add_gudhi_module file_path)
endif()
# Required by user_version
set(GUDHI_MODULES_FULL_LIST ${GUDHI_MODULES_FULL_LIST} ${file_path} PARENT_SCOPE)
- # Include module headers is independant - You may ask for no Alpha complex module but Python interface i.e.
+ # Include module headers is independent - You may ask for no Alpha complex module but Python interface i.e.
if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/src/${file_path}/include/)
include_directories(src/${file_path}/include/)
endif()
endfunction(add_gudhi_module)
-option(WITH_GUDHI_BENCHMARK "Activate/desactivate benchmark compilation" OFF)
-option(WITH_GUDHI_EXAMPLE "Activate/desactivate examples compilation and installation" OFF)
-option(WITH_GUDHI_PYTHON "Activate/desactivate python module compilation and installation" ON)
-option(WITH_GUDHI_TEST "Activate/desactivate examples compilation and installation" ON)
-option(WITH_GUDHI_UTILITIES "Activate/desactivate utilities compilation and installation" ON)
-
if (WITH_GUDHI_BENCHMARK)
set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};benchmark")
endif()
@@ -30,7 +24,12 @@ if (WITH_GUDHI_EXAMPLE)
set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};example")
endif()
if (WITH_GUDHI_TEST)
- set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};test")
+ # All tests are using boost tests
+ if(TARGET Boost::unit_test_framework)
+ set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};test")
+ else()
+ message("++ WITH_GUDHI_TEST but no TARGET Boost::unit_test_framework")
+ endif()
endif()
if (WITH_GUDHI_UTILITIES)
set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};utilities")
diff --git a/src/cmake/modules/GUDHI_options.cmake b/src/cmake/modules/GUDHI_options.cmake
new file mode 100644
index 00000000..8379e3c6
--- /dev/null
+++ b/src/cmake/modules/GUDHI_options.cmake
@@ -0,0 +1,15 @@
+option(WITH_GUDHI_BENCHMARK "Activate/deactivate benchmark compilation" OFF)
+option(WITH_GUDHI_EXAMPLE "Activate/deactivate examples compilation and installation" OFF)
+option(WITH_GUDHI_REMOTE_TEST "Activate/deactivate datasets fetching test which uses the Internet" OFF)
+option(WITH_GUDHI_PYTHON "Activate/deactivate python module compilation and installation" ON)
+option(WITH_GUDHI_TEST "Activate/deactivate examples compilation and installation" ON)
+option(WITH_GUDHI_UTILITIES "Activate/deactivate utilities compilation and installation" ON)
+option(WITH_GUDHI_THIRD_PARTY "Activate/deactivate third party libraries cmake detection. When set to OFF, it is useful for doxygen or user_version i.e." ON)
+
+if (NOT WITH_GUDHI_THIRD_PARTY)
+ set (WITH_GUDHI_BENCHMARK OFF)
+ set (WITH_GUDHI_EXAMPLE OFF)
+ set (WITH_GUDHI_PYTHON OFF)
+ set (WITH_GUDHI_TEST OFF)
+ set (WITH_GUDHI_UTILITIES OFF)
+endif()
diff --git a/src/cmake/modules/GUDHI_submodules.cmake b/src/cmake/modules/GUDHI_submodules.cmake
new file mode 100644
index 00000000..9ede852d
--- /dev/null
+++ b/src/cmake/modules/GUDHI_submodules.cmake
@@ -0,0 +1,5 @@
+# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
+set(HERA_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/include)
+set(HERA_INCLUDE_DIR ${HERA_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find hera/{wasserstein.h,bottleneck.h}")
+# since everything is cleanly under include/hera/, there is no harm always including it
+include_directories(${HERA_INCLUDE_DIR})
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 24a34150..2cf6787e 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -1,10 +1,14 @@
# This files manage third party libraries required by GUDHI
-find_package(Boost 1.56.0 REQUIRED COMPONENTS system filesystem unit_test_framework program_options thread)
+find_package(Boost 1.66.0 QUIET OPTIONAL_COMPONENTS filesystem unit_test_framework program_options)
-if(NOT Boost_FOUND)
+# Boost_FOUND is not reliable
+if(NOT Boost_VERSION)
message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.")
-endif(NOT Boost_FOUND)
+endif(NOT Boost_VERSION)
+include_directories(${Boost_INCLUDE_DIRS})
+message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS})
+message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS})
find_package(GMP)
if(GMP_FOUND)
@@ -15,6 +19,15 @@ if(GMP_FOUND)
endif()
endif()
+# from windows vcpkg eigen 3.4.0#2 : build fails with
+# error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+# cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+# Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ message("++ User explicit demand to force EIGEN_DEFAULT_DENSE_INDEX_TYPE to int")
+ add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int)
+endif()
+
# In CMakeLists.txt, when include(${CGAL_USE_FILE}), CMAKE_CXX_FLAGS are overwritten.
# cf. http://doc.cgal.org/latest/Manual/installation.html#title40
# A workaround is to include(${CGAL_USE_FILE}) before adding "-std=c++11".
@@ -82,70 +95,92 @@ add_definitions( -DBOOST_ALL_DYN_LINK )
# problem on Mac with boost_system and boost_thread
add_definitions( -DBOOST_SYSTEM_NO_DEPRECATED )
-INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS})
-LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
-
-message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS})
-message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS})
-
-# Find the correct Python interpreter.
-# Can be set with -DPYTHON_EXECUTABLE=/usr/bin/python3 or -DPython_ADDITIONAL_VERSIONS=3 for instance.
-find_package( PythonInterp )
-
-# find_python_module tries to import module in Python interpreter and to retrieve its version number
-# returns ${PYTHON_MODULE_NAME_UP}_VERSION and ${PYTHON_MODULE_NAME_UP}_FOUND
-function( find_python_module PYTHON_MODULE_NAME )
- string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}; print(${PYTHON_MODULE_NAME}.__version__)"
- RESULT_VARIABLE PYTHON_MODULE_RESULT
- OUTPUT_VARIABLE PYTHON_MODULE_VERSION
- ERROR_VARIABLE PYTHON_MODULE_ERROR)
- if(PYTHON_MODULE_RESULT EQUAL 0)
- # Remove carriage return
- string(STRIP ${PYTHON_MODULE_VERSION} PYTHON_MODULE_VERSION)
- message ("++ Python module ${PYTHON_MODULE_NAME} - Version ${PYTHON_MODULE_VERSION} found")
-
- set(${PYTHON_MODULE_NAME_UP}_VERSION ${PYTHON_MODULE_VERSION} PARENT_SCOPE)
- set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
- else()
- message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
- - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
- - PYTHON_MODULE_VERSION = ${PYTHON_MODULE_VERSION}
- - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
- unset(${PYTHON_MODULE_NAME_UP}_VERSION PARENT_SCOPE)
- set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+if (WITH_GUDHI_PYTHON)
+ # Find the correct Python interpreter.
+ # Can be set with -DPYTHON_EXECUTABLE=/usr/bin/python3 or -DPython_ADDITIONAL_VERSIONS=3 for instance.
+ find_package( PythonInterp )
+
+ # find_python_module tries to import module in Python interpreter and to retrieve its version number
+ # returns ${PYTHON_MODULE_NAME_UP}_VERSION and ${PYTHON_MODULE_NAME_UP}_FOUND
+ function( find_python_module PYTHON_MODULE_NAME )
+ string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}; print(${PYTHON_MODULE_NAME}.__version__)"
+ RESULT_VARIABLE PYTHON_MODULE_RESULT
+ OUTPUT_VARIABLE PYTHON_MODULE_VERSION
+ ERROR_VARIABLE PYTHON_MODULE_ERROR)
+ if(PYTHON_MODULE_RESULT EQUAL 0)
+ # Remove all carriage returns as it can be multiline
+ string(REGEX REPLACE "\n" " " PYTHON_MODULE_VERSION "${PYTHON_MODULE_VERSION}")
+ message ("++ Python module ${PYTHON_MODULE_NAME} - Version ${PYTHON_MODULE_VERSION} found")
+
+ set(${PYTHON_MODULE_NAME_UP}_VERSION ${PYTHON_MODULE_VERSION} PARENT_SCOPE)
+ set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
+ else()
+ message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
+ - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
+ - PYTHON_MODULE_VERSION = ${PYTHON_MODULE_VERSION}
+ - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
+ unset(${PYTHON_MODULE_NAME_UP}_VERSION PARENT_SCOPE)
+ set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+ endif()
+ endfunction( find_python_module )
+
+ # For modules that do not define module.__version__
+ function( find_python_module_no_version PYTHON_MODULE_NAME )
+ string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}"
+ RESULT_VARIABLE PYTHON_MODULE_RESULT
+ ERROR_VARIABLE PYTHON_MODULE_ERROR)
+ if(PYTHON_MODULE_RESULT EQUAL 0)
+ # Remove carriage return
+ message ("++ Python module ${PYTHON_MODULE_NAME} found")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
+ else()
+ message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
+ - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
+ - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+ endif()
+ endfunction( find_python_module_no_version )
+
+ if( PYTHONINTERP_FOUND )
+ find_python_module("cython")
+ find_python_module("pytest")
+ find_python_module("matplotlib")
+ find_python_module("numpy")
+ find_python_module("scipy")
+ find_python_module("sphinx")
+ find_python_module("sklearn")
+ find_python_module("ot")
+ find_python_module("pybind11")
+ find_python_module("torch")
+ find_python_module("pykeops")
+ find_python_module("eagerpy")
+ find_python_module_no_version("hnswlib")
+ find_python_module("tensorflow")
+ find_python_module("sphinx_paramlinks")
+ find_python_module_no_version("python_docs_theme")
endif()
-endfunction( find_python_module )
-
-if( PYTHONINTERP_FOUND )
- find_python_module("cython")
- find_python_module("pytest")
- find_python_module("matplotlib")
- find_python_module("numpy")
- find_python_module("scipy")
- find_python_module("sphinx")
- find_python_module("sklearn")
- find_python_module("ot")
-endif()
-
-if(NOT GUDHI_PYTHON_PATH)
- message(FATAL_ERROR "ERROR: GUDHI_PYTHON_PATH is not valid.")
-endif(NOT GUDHI_PYTHON_PATH)
-
-option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Usefull when setting rpath is not allowed" ON)
-
-if(PYTHONINTERP_FOUND AND CYTHON_FOUND)
- if(SPHINX_FOUND)
- # Documentation generation is available through sphinx
- find_program( SPHINX_PATH sphinx-build )
-
- if(NOT SPHINX_PATH)
- if(PYTHON_VERSION_MAJOR EQUAL 3)
- # In Python3, just hack sphinx-build if it does not exist
- set(SPHINX_PATH "${PYTHON_EXECUTABLE}" "${CMAKE_CURRENT_SOURCE_DIR}/${GUDHI_PYTHON_PATH}/doc/python3-sphinx-build.py")
- endif(PYTHON_VERSION_MAJOR EQUAL 3)
- endif(NOT SPHINX_PATH)
- endif(SPHINX_FOUND)
-endif(PYTHONINTERP_FOUND AND CYTHON_FOUND)
-
+
+ if(NOT GUDHI_PYTHON_PATH)
+ message(FATAL_ERROR "ERROR: GUDHI_PYTHON_PATH is not valid.")
+ endif(NOT GUDHI_PYTHON_PATH)
+
+ option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Useful when setting rpath is not allowed" ON)
+
+ if(PYTHONINTERP_FOUND AND CYTHON_FOUND)
+ if(SPHINX_FOUND)
+ # Documentation generation is available through sphinx
+ find_program( SPHINX_PATH sphinx-build )
+
+ if(NOT SPHINX_PATH)
+ if(PYTHON_VERSION_MAJOR EQUAL 3)
+ # In Python3, just hack sphinx-build if it does not exist
+ set(SPHINX_PATH "${PYTHON_EXECUTABLE}" "-m" "sphinx.cmd.build")
+ endif(PYTHON_VERSION_MAJOR EQUAL 3)
+ endif(NOT SPHINX_PATH)
+ endif(SPHINX_FOUND)
+ endif(PYTHONINTERP_FOUND AND CYTHON_FOUND)
+endif (WITH_GUDHI_PYTHON)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 0b361a0f..b9bf1414 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -14,23 +14,21 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
make_directory ${GUDHI_USER_VERSION_DIR}
COMMENT "user_version creation in ${GUDHI_USER_VERSION_DIR}")
-foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
- set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} doc/${GUDHI_MODULE}/ \\ \n")
-endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
-
-# Generate Doxyfile for Doxygen - cf. root CMakeLists.txt for explanation
-configure_file(${CMAKE_SOURCE_DIR}/src/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile" @ONLY)
-add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy ${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile ${GUDHI_USER_VERSION_DIR}/Doxyfile)
-
# Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
-file(COPY "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
-# Copy biblio directory for user version
+file(COPY "${CMAKE_SOURCE_DIR}/biblio/test" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio")
+
+# append cgal citation inside bibliography - sphinx cannot deal with more than one bib file
+file(READ "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" CGAL_CITATION_CONTENT)
+file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib" "${CGAL_CITATION_CONTENT}")
+
+# Copy biblio files for user version
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib ${GUDHI_USER_VERSION_DIR}/biblio/bibliography.bib)
+add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib ${GUDHI_USER_VERSION_DIR}/biblio/how_to_cite_gudhi.bib)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/README.md ${GUDHI_USER_VERSION_DIR}/README.md)
@@ -44,9 +42,20 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/src/GUDHIConfig.cmake.in ${GUDHI_USER_VERSION_DIR}/GUDHIConfig.cmake.in)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/CMakeGUDHIVersion.txt ${GUDHI_USER_VERSION_DIR}/CMakeGUDHIVersion.txt)
-
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH} ${GUDHI_USER_VERSION_DIR}/python)
+ copy ${CMAKE_SOURCE_DIR}/src/Doxyfile.in ${GUDHI_USER_VERSION_DIR}/Doxyfile.in)
+
+# As cython generates .cpp files in source, we have to copy all except cpp files from python directory
+file(GLOB_RECURSE PYTHON_FILES ${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH}/*)
+foreach(PYTHON_FILE ${PYTHON_FILES})
+ get_filename_component(PYTHON_FILE_EXT ${PYTHON_FILE} EXT)
+ if (NOT "${PYTHON_FILE_EXT}" STREQUAL ".cpp")
+ string(REPLACE "${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH}/" "" RELATIVE_PYTHON_FILE ${PYTHON_FILE})
+ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy ${PYTHON_FILE} ${GUDHI_USER_VERSION_DIR}/python/${RELATIVE_PYTHON_FILE})
+ endif()
+endforeach()
+
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/data ${GUDHI_USER_VERSION_DIR}/data)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
@@ -54,6 +63,11 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI)
+if(HERA_INCLUDE_DIR STREQUAL HERA_INTERNAL_INCLUDE_DIR)
+ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/include ${GUDHI_USER_VERSION_DIR}/ext/hera/include)
+endif()
+
set(GUDHI_DIRECTORIES "doc;example;concept;utilities")
set(GUDHI_INCLUDE_DIRECTORIES "include/gudhi")
@@ -93,4 +107,4 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
endforeach()
endforeach(GUDHI_INCLUDE_DIRECTORY ${GUDHI_INCLUDE_DIRECTORIES})
-endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) \ No newline at end of file
+endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
diff --git a/src/common/benchmark/CMakeLists.txt b/src/common/benchmark/CMakeLists.txt
index a3787d6e..26e4e6af 100644
--- a/src/common/benchmark/CMakeLists.txt
+++ b/src/common/benchmark/CMakeLists.txt
@@ -1,3 +1,7 @@
project(common_benchmark)
add_executable(Graph_simplicial_complex_benchmark Graph_simplicial_complex_benchmark.cpp)
+
+if (TBB_FOUND)
+ target_link_libraries(Graph_simplicial_complex_benchmark ${TBB_LIBRARIES})
+endif()
diff --git a/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp b/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp
index 0fc145fd..6fe7a887 100644
--- a/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp
+++ b/src/common/benchmark/Graph_simplicial_complex_benchmark.cpp
@@ -66,7 +66,7 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi
Gudhi::Points_off_reader<std::vector<double>> off_reader(off_file_name);
assert(off_reader.is_valid());
- std::cout << "+ " << msg << std::endl;
+ std::clog << "+ " << msg << std::endl;
results_csv << "\"nb_points\";"
<< "\"nb_simplices\";"
@@ -82,7 +82,7 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi
Gudhi::Euclidean_distance());
// benchmark end
pg_compute_proximity_graph.end();
- std::cout << pg_compute_proximity_graph;
+ std::clog << pg_compute_proximity_graph;
Gudhi::Simplex_tree<> complex;
Gudhi::Clock st_create_clock(" benchmark_proximity_graph - complex creation");
@@ -91,13 +91,13 @@ void benchmark_proximity_graph(const std::string& msg, const std::string& off_fi
complex.insert_graph(proximity_graph);
// benchmark end
st_create_clock.end();
- std::cout << st_create_clock;
+ std::clog << st_create_clock;
results_csv << off_reader.get_point_cloud().size() << ";" << complex.num_simplices() << ";"
<< pg_compute_proximity_graph.num_seconds() << ";"
<< st_create_clock.num_seconds() << ";" << std::endl;
- std::cout << " benchmark_proximity_graph - nb simplices = " << complex.num_simplices() << std::endl;
+ std::clog << " benchmark_proximity_graph - nb simplices = " << complex.num_simplices() << std::endl;
}
int main(int argc, char * const argv[]) {
diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h
index c19b3444..1634b19e 100644
--- a/src/common/doc/examples.h
+++ b/src/common/doc/examples.h
@@ -1,96 +1,133 @@
-// List of GUDHI examples - Doxygen needs at least a file tag to analyse comments
-// In user_version, `find . -name "*.cpp"` in example and utilities folders
-/*! @file Examples
- * @example Alpha_complex/Alpha_complex_from_off.cpp
- * @example Alpha_complex/Alpha_complex_from_points.cpp
- * @example Bottleneck_distance/bottleneck_basic_example.cpp
- * @example Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp
- * @example Witness_complex/example_nearest_landmark_table.cpp
- * @example Witness_complex/example_witness_complex_off.cpp
- * @example Witness_complex/example_witness_complex_sphere.cpp
- * @example Witness_complex/example_strong_witness_complex_off.cpp
- * @example Simplex_tree/mini_simplex_tree.cpp
- * @example Simplex_tree/graph_expansion_with_blocker.cpp
- * @example Simplex_tree/simple_simplex_tree.cpp
- * @example Simplex_tree/simplex_tree_from_cliques_of_graph.cpp
- * @example Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp
- * @example Simplex_tree/cech_complex_cgal_mini_sphere_3d.cpp
- * @example Persistent_cohomology/plain_homology.cpp
- * @example Persistent_cohomology/persistence_from_file.cpp
- * @example Persistent_cohomology/rips_persistence_step_by_step.cpp
- * @example Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp
- * @example Persistent_cohomology/custom_persistence_sort.cpp
- * @example Persistent_cohomology/persistence_from_simple_simplex_tree.cpp
- * @example Persistent_cohomology/rips_multifield_persistence.cpp
- * @example Skeleton_blocker/Skeleton_blocker_from_simplices.cpp
- * @example Skeleton_blocker/Skeleton_blocker_iteration.cpp
- * @example Skeleton_blocker/Skeleton_blocker_link.cpp
- * @example Contraction/Garland_heckbert.cpp
- * @example Contraction/Rips_contraction.cpp
- * @example Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp
- * @example common/example_CGAL_3D_points_off_reader.cpp
- * @example common/example_vector_double_points_off_reader.cpp
- * @example common/example_CGAL_points_off_reader.cpp
- * @example Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp
- * @example Rips_complex/example_one_skeleton_rips_from_points.cpp
- * @example Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp
- * @example Rips_complex/example_rips_complex_from_off_file.cpp
- * @example Persistence_representations/persistence_intervals.cpp
- * @example Persistence_representations/persistence_vectors.cpp
- * @example Persistence_representations/persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_landscape_on_grid.cpp
- * @example Persistence_representations/persistence_landscape.cpp
- * @example Tangential_complex/example_basic.cpp
- * @example Tangential_complex/example_with_perturb.cpp
- * @example Subsampling/example_custom_kernel.cpp
- * @example Subsampling/example_choose_n_farthest_points.cpp
- * @example Subsampling/example_sparsify_point_set.cpp
- * @example Subsampling/example_pick_n_random_points.cpp
- * @example Nerve_GIC/CoordGIC.cpp
- * @example Nerve_GIC/Nerve.cpp
- * @example Nerve_GIC/FuncGIC.cpp
- * @example Nerve_GIC/VoronoiGIC.cpp
- * @example Spatial_searching/example_spatial_searching.cpp
- * @example Alpha_complex/alpha_complex_3d_persistence.cpp
- * @example Alpha_complex/alpha_complex_persistence.cpp
- * @example Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp
- * @example Bottleneck_distance/bottleneck_distance.cpp
- * @example Witness_complex/weak_witness_persistence.cpp
- * @example Witness_complex/strong_witness_persistence.cpp
- * @example Bitmap_cubical_complex/cubical_complex_persistence.cpp
- * @example Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp
- * @example common/off_file_from_shape_generator.cpp
- * @example Rips_complex/rips_distance_matrix_persistence.cpp
- * @example Rips_complex/rips_persistence.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
- * @example Persistence_representations/persistence_intervals/compute_number_of_dominant_intervals.cpp
- * @example Persistence_representations/persistence_intervals/plot_persistence_Betti_numbers.cpp
- * @example Persistence_representations/persistence_intervals/plot_persistence_intervals.cpp
- * @example Persistence_representations/persistence_intervals/plot_histogram_of_intervals_lengths.cpp
- * @example Persistence_representations/persistence_intervals/compute_bottleneck_distance.cpp
- * @example Persistence_representations/persistence_heat_maps/create_pssk.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
- * @example Persistence_representations/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
- * @example Persistence_representations/persistence_heat_maps/average_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/plot_persistence_heat_map.cpp
- * @example Persistence_representations/persistence_heat_maps/create_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_vectors/plot_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/compute_distance_of_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/average_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/create_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
- * @example Persistence_representations/persistence_landscapes/average_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/compute_scalar_product_of_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/create_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/compute_distance_of_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/plot_landscapes.cpp
+// List of GUDHI examples and utils - Doxygen needs at least a file tag to analyse comments
+// Generated from scripts/cpp_examples_for_doxygen.py
+/*! @file
+ * \section Witness_complex_example_section Witness_complex
+ * @example strong_witness_persistence.cpp
+ * @example weak_witness_persistence.cpp
+ * @example example_witness_complex_off.cpp
+ * @example example_strong_witness_complex_off.cpp
+ * @example example_nearest_landmark_table.cpp
+ * @example example_witness_complex_sphere.cpp
+ * \section Contraction_example_section Contraction
+ * @example Rips_contraction.cpp
+ * @example Garland_heckbert.cpp
+ * \section Simplex_tree_example_section Simplex_tree
+ * @example mini_simplex_tree.cpp
+ * @example cech_complex_cgal_mini_sphere_3d.cpp
+ * @example graph_expansion_with_blocker.cpp
+ * @example simple_simplex_tree.cpp
+ * @example simplex_tree_from_cliques_of_graph.cpp
+ * @example example_alpha_shapes_3_simplex_tree_from_off_file.cpp
+ * \section Persistent_cohomology_example_section Persistent_cohomology
+ * @example custom_persistence_sort.cpp
+ * @example rips_persistence_step_by_step.cpp
+ * @example persistence_from_file.cpp
+ * @example rips_persistence_via_boundary_matrix.cpp
+ * @example plain_homology.cpp
+ * @example rips_multifield_persistence.cpp
+ * @example persistence_from_simple_simplex_tree.cpp
+ * \section Subsampling_example_section Subsampling
+ * @example example_sparsify_point_set.cpp
+ * @example example_choose_n_farthest_points.cpp
+ * @example example_custom_distance.cpp
+ * @example example_pick_n_random_points.cpp
+ * \section Toplex_map_example_section Toplex_map
+ * @example simple_toplex_map.cpp
+ * \section Collapse_example_section Collapse
+ * @example distance_matrix_edge_collapse_rips_persistence.cpp
+ * @example point_cloud_edge_collapse_rips_persistence.cpp
+ * @example edge_collapse_conserve_persistence.cpp
+ * @example edge_collapse_basic_example.cpp
+ * \section Cech_complex_example_section Cech_complex
+ * @example cech_persistence.cpp
+ * @example cech_complex_example_from_points.cpp
+ * \section Bitmap_cubical_complex_example_section Bitmap_cubical_complex
+ * @example periodic_cubical_complex_persistence.cpp
+ * @example cubical_complex_persistence.cpp
+ * @example Random_bitmap_cubical_complex.cpp
+ * \section Coxeter_triangulation_example_section Coxeter_triangulation
+ * @example cell_complex_from_basic_circle_manifold.cpp
+ * @example manifold_tracing_flat_torus_with_boundary.cpp
+ * @example manifold_tracing_custom_function.cpp
+ * \section Nerve_GIC_example_section Nerve_GIC
+ * @example VoronoiGIC.cpp
+ * @example Nerve.cpp
+ * @example CoordGIC.cpp
+ * @example FuncGIC.cpp
+ * \section Tangential_complex_example_section Tangential_complex
+ * @example example_basic.cpp
+ * @example example_with_perturb.cpp
+ * \section Persistence_representations_example_section Persistence_representations
+ * @example persistence_vectors/create_persistence_vectors.cpp
+ * @example persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
+ * @example persistence_vectors/plot_persistence_vectors.cpp
+ * @example persistence_vectors/average_persistence_vectors.cpp
+ * @example persistence_vectors/compute_distance_of_persistence_vectors.cpp
+ * @example persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
+ * @example persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
+ * @example persistence_intervals/plot_persistence_Betti_numbers.cpp
+ * @example persistence_intervals/compute_bottleneck_distance.cpp
+ * @example persistence_intervals/compute_number_of_dominant_intervals.cpp
+ * @example persistence_intervals/plot_histogram_of_intervals_lengths.cpp
+ * @example persistence_intervals/plot_persistence_intervals.cpp
+ * @example persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/create_pssk.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
+ * @example persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/plot_persistence_heat_map.cpp
+ * @example persistence_heat_maps/create_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/average_persistence_heat_maps.cpp
+ * @example persistence_landscapes/compute_distance_of_landscapes.cpp
+ * @example persistence_landscapes/compute_scalar_product_of_landscapes.cpp
+ * @example persistence_landscapes/average_landscapes.cpp
+ * @example persistence_landscapes/plot_landscapes.cpp
+ * @example persistence_landscapes/create_landscapes.cpp
+ * @example persistence_landscape_on_grid.cpp
+ * @example persistence_intervals.cpp
+ * @example persistence_landscape.cpp
+ * @example persistence_vectors.cpp
+ * @example sliced_wasserstein.cpp
+ * @example persistence_heat_maps.cpp
+ * \section Spatial_searching_example_section Spatial_searching
+ * @example example_spatial_searching.cpp
+ * \section Bottleneck_distance_example_section Bottleneck_distance
+ * @example bottleneck_distance.cpp
+ * @example bottleneck_basic_example.cpp
+ * @example alpha_rips_persistence_bottleneck_distance.cpp
+ * \section common_example_section common
+ * @example off_file_from_shape_generator.cpp
+ * @example example_vector_double_points_off_reader.cpp
+ * @example example_CGAL_points_off_reader.cpp
+ * @example example_CGAL_3D_points_off_reader.cpp
+ * \section Alpha_complex_example_section Alpha_complex
+ * @example alpha_complex_3d_persistence.cpp
+ * @example alpha_complex_persistence.cpp
+ * @example Fast_alpha_complex_from_off.cpp
+ * @example Alpha_complex_3d_from_points.cpp
+ * @example Alpha_complex_from_off.cpp
+ * @example Weighted_alpha_complex_3d_from_points.cpp
+ * @example Weighted_alpha_complex_from_points.cpp
+ * @example Alpha_complex_from_points.cpp
+ * \section Skeleton_blocker_example_section Skeleton_blocker
+ * @example Skeleton_blocker_from_simplices.cpp
+ * @example Skeleton_blocker_link.cpp
+ * @example Skeleton_blocker_iteration.cpp
+ * \section Rips_complex_example_section Rips_complex
+ * @example rips_persistence.cpp
+ * @example rips_correlation_matrix_persistence.cpp
+ * @example sparse_rips_persistence.cpp
+ * @example rips_distance_matrix_persistence.cpp
+ * @example example_sparse_rips.cpp
+ * @example example_rips_complex_from_csv_distance_matrix_file.cpp
+ * @example example_one_skeleton_rips_from_correlation_matrix.cpp
+ * @example example_one_skeleton_rips_from_distance_matrix.cpp
+ * @example example_one_skeleton_rips_from_points.cpp
+ * @example example_rips_complex_from_off_file.cpp
*/
diff --git a/src/common/doc/footer.html b/src/common/doc/footer.html
index 4168c6bc..08a2cbd0 100644
--- a/src/common/doc/footer.html
+++ b/src/common/doc/footer.html
@@ -1,5 +1,9 @@
-<!-- HTML footer for doxygen 1.8.6-->
+<!-- HTML footer for doxygen 1.9.4-->
<!-- start footer part -->
+<!--BEGIN GENERATE_TREEVIEW-->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+<!--END GENERATE_TREEVIEW-->
+<ul>
<table style="width:100%">
<tr class="no-bullet shadow-black">
<td class="network-entypo">
@@ -10,14 +14,15 @@
<!--END PROJECT_NAME-->
</td>
<td class="network-entypo">
-<!--BEGIN GENERATE_TREEVIEW-->
$generatedby
<a href="http://www.doxygen.org/index.html">
Doxygen</a> $doxygenversion
-<!--END GENERATE_TREEVIEW-->
</td>
</tr>
</table>
-
+</ul>
+<!--BEGIN GENERATE_TREEVIEW-->
+</div>
+<!--END GENERATE_TREEVIEW-->
</body>
</html>
diff --git a/src/common/doc/header.html b/src/common/doc/header.html
index 9fdb2321..a97e1b2f 100644
--- a/src/common/doc/header.html
+++ b/src/common/doc/header.html
@@ -8,9 +8,6 @@
<meta name="generator" content="Doxygen $doxygenversion"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
-<!-- GUDHI website css for header BEGIN -->
-<link rel="stylesheet" type="text/css" href="https://gudhi.inria.fr/assets/css/styles_feeling_responsive.css" />
-<!-- GUDHI website css for header END -->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
@@ -18,72 +15,77 @@ $treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+<!-- GUDHI website css for header BEGIN -->
+<link rel="stylesheet" type="text/css" href="https://gudhi.inria.fr/assets/css/styles_feeling_responsive.css" />
+<!-- GUDHI website css for header END -->
$extrastylesheet
</head>
<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!-- GUDHI website header BEGIN -->
<div id="navigation" class="sticky">
- <nav class="top-bar" role="navigation" data-topbar>
- <ul class="title-area">
- <li class="name">
- <h1 class="show-for-small-only"><a href="" class="icon-tree"> GUDHI library</a></h1>
- </li>
- <!-- Remove the class "menu-icon" to get rid of menu icon. Take out "Menu" to just have icon alone -->
- <li class="toggle-topbar menu-icon"><a href="#"><span>Navigation</span></a></li>
+ <nav class="top-bar" role="navigation" data-topbar="true">
+ <ul class="title-area">
+ <li class="name">
+ <h1 class="show-for-small-only"><a href="" class="icon-tree"> GUDHI library</a></h1>
+ </li>
+ <!-- Remove the class "menu-icon" to get rid of menu icon. Take out "Menu" to just have icon alone -->
+ <li class="toggle-topbar menu-icon"><a href="#"><span>Nav</span></a></li>
+ </ul>
+ <section class="top-bar-section">
+ <ul class="right">
+ <li class="divider"></li>
+ <li><a href="/contact/">Contact</a></li>
</ul>
- <section class="top-bar-section">
- <ul class="right">
- <li class="divider"></li>
- <li><a href="/contact/">Contact</a></li>
- </ul>
- <ul class="left">
- <li><a href="/"> <img src="/assets/img/home.png" alt="&nbsp;&nbsp;GUDHI">&nbsp;&nbsp;GUDHI </a></li>
- <li class="divider"></li>
- <li class="has-dropdown">
- <a href="#">Project</a>
- <ul class="dropdown">
- <li><a href="/people/">People</a></li>
- <li><a href="/keepintouch/">Keep in touch</a></li>
- <li><a href="/partners/">Partners and Funding</a></li>
- <li><a href="/relatedprojects/">Related projects</a></li>
- <li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
- <li><a href="/inaction/">GUDHI in action</a></li>
- </ul>
- </li>
- <li class="divider"></li>
- <li class="has-dropdown">
- <a href="#">Download</a>
- <ul class="dropdown">
- <li><a href="/licensing/">Licensing</a></li>
- <li><a href="https://gforge.inria.fr/frs/download.php/latestzip/5253/library-latest.zip" target="_blank">Get the latest sources</a></li>
- <li><a href="/conda/">Conda package</a></li>
- <li><a href="/dockerfile/">Dockerfile</a></li>
- </ul>
- </li>
- <li class="divider"></li>
- <li class="has-dropdown">
- <a href="#">Documentation</a>
- <ul class="dropdown">
- <li><a href="/introduction/">Introduction</a></li>
- <li><a href="https://gudhi.inria.fr/doc/latest/installation.html">C++ installation manual</a></li>
- <li><a href="https://gudhi.inria.fr/doc/latest/">C++ documentation</a></li>
- <li><a href="https://gudhi.inria.fr/python/latest/installation.html">Python installation manual</a></li>
- <li><a href="https://gudhi.inria.fr/python/latest/">Python documentation</a></li>
- <li><a href="/utils/">Utilities</a></li>
- <li><a href="/tutorials/">Tutorials</a></li>
- </ul>
- </li>
- <li class="divider"></li>
- <li><a href="/interfaces/">Interfaces</a></li>
- <li class="divider"></li>
- </ul>
- </section>
- </nav>
- </div><!-- /#navigation -->
- <!-- GUDHI website header BEGIN -->
+ <ul class="left">
+ <li><a href="/"> <img src="/assets/img/home.png" alt=" GUDHI"/> GUDHI </a></li>
+ <li class="divider"></li>
+ <li class="has-dropdown">
+ <a href="#">Project</a>
+ <ul class="dropdown">
+ <li><a href="/people/">People</a></li>
+ <li><a href="/keepintouch/">Keep in touch</a></li>
+ <li><a href="/partners/">Partners and Funding</a></li>
+ <li><a href="/relatedprojects/">Related projects</a></li>
+ <li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
+ <li><a href="/inaction/">GUDHI in action</a></li>
+ <li><a href="/etymology/">Etymology</a></li>
+ </ul>
+ </li>
+ <li class="divider"></li>
+ <li class="has-dropdown">
+ <a href="#">Download</a>
+ <ul class="dropdown">
+ <li><a href="/licensing/">Licensing</a></li>
+ <li><a href="https://github.com/GUDHI/gudhi-devel/releases/latest" target="_blank">Get the latest sources</a></li>
+ <li><a href="/conda/">Conda package</a></li>
+ <li><a href="https://pypi.org/project/gudhi/" target="_blank">Pip package</a></li>
+ <li><a href="/dockerfile/">Dockerfile</a></li>
+ </ul>
+ </li>
+ <li class="divider"></li>
+ <li class="has-dropdown">
+ <a href="#">Documentation</a>
+ <ul class="dropdown">
+ <li><a href="/introduction/">Introduction</a></li>
+ <li><a href="/doc/latest/installation.html">C++ installation manual</a></li>
+ <li><a href="/doc/latest/">C++ documentation</a></li>
+ <li><a href="/python/latest/installation.html">Python installation manual</a></li>
+ <li><a href="/python/latest/">Python documentation</a></li>
+ <li><a href="/utils/">Utilities</a></li>
+ <li><a href="/tutorials/">Tutorials</a></li>
+ </ul>
+ </li>
+ <li class="divider"></li>
+ <li><a href="/interfaces/">Interfaces</a></li>
+ <li class="divider"></li>
+ </ul>
+ </section>
+ </nav>
+</div><!-- /#navigation -->
+<!-- GUDHI website header END -->
-<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index ce2c5448..f2f8a476 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -5,22 +5,22 @@
* Examples of GUDHI headers inclusion can be found in \ref utilities.
*
* \section compiling Compiling
- * The library uses c++14 and requires <a target="_blank" href="http://www.boost.org/">Boost</a> &ge; 1.56.0
- * and <a target="_blank" href="https://www.cmake.org/">CMake</a> &ge; 3.1.
- * It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2015.
+ * The library uses c++17 and requires <a target="_blank" href="https://www.boost.org/">Boost</a> &ge; 1.66.0
+ * and <a target="_blank" href="https://cmake.org/">CMake</a> &ge; 3.5.
+ * It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2017.
*
* \subsection utilities Utilities and examples
* To build the utilities, run the following commands in a terminal:
\verbatim cd /path-to-gudhi/
mkdir build
cd build/
-cmake ..
+cmake -DCMAKE_BUILD_TYPE=Release ..
make \endverbatim
* By default, examples are disabled. You can activate their compilation with
- * <a href="https://cmake.org/cmake/help/v3.0/manual/ccmake.1.html">ccmake</a> (on Linux and Mac OSX),
- * <a href="https://cmake.org/cmake/help/v3.0/manual/cmake-gui.1.html">cmake-gui</a> (on Windows) or by modifying the
+ * <a href="https://cmake.org/cmake/help/latest/manual/ccmake.1.html">ccmake</a> (on Linux and Mac OSX),
+ * <a href="https://cmake.org/cmake/help/latest/manual/cmake-gui.1.html">cmake-gui</a> (on Windows) or by modifying the
* cmake command as follows :
-\verbatim cmake -DWITH_GUDHI_EXAMPLE=ON ..
+\verbatim cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=ON ..
make \endverbatim
* A list of utilities and examples is available <a href="examples.html">here</a>.
*
@@ -28,23 +28,31 @@ make \endverbatim
* To install the library (headers and activated utilities), run the following command in a terminal:
* \verbatim make install \endverbatim
* This action may require to be in the sudoer or administrator of the machine in function of the operating system and
- * of <a href="https://cmake.org/cmake/help/v3.0/variable/CMAKE_INSTALL_PREFIX.html">CMAKE_INSTALL_PREFIX</a>.
+ * of <a href="https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html">CMAKE_INSTALL_PREFIX</a>.
*
+ * \note Python module will be compiled by the `make` command, but `make install` will not install it. Please refer to
+ * the <a href="https://gudhi.inria.fr/python/latest/installation.html#gudhi-python-module-installation">Python
+ * module installation documentation</a>.
+ *
* \subsection testsuites Test suites
* To test your build, run the following command in a terminal:
* \verbatim make test \endverbatim
* `make test` is using <a href="https://cmake.org/cmake/help/latest/manual/ctest.1.html">Ctest</a> (CMake test driver
* program). If some of the tests are failing, please send us the result of the following command:
* \verbatim ctest --output-on-failure \endverbatim
+ * Testing fetching datasets feature requires the use of the internet and is disabled by default. If you want to include this test, set WITH_GUDHI_REMOTE_TEST to ON when building in the previous step (note that this test is included in the python module):
+ * \verbatim cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_REMOTE_TEST=ON --DWITH_GUDHI_PYTHON=ON .. \endverbatim
*
- * \subsection documentationgeneration Documentation
- * To generate the documentation, <a target="_blank" href="http://www.doxygen.org/">Doxygen</a> is required.
- * Run the following command in a terminal:
-\verbatim
-make doxygen
-# Documentation will be generated in the folder YYYY-MM-DD-hh-mm-ss_GUDHI_X.Y.Z/doc/html/
-# You can customize the directory name by calling `cmake -DUSER_VERSION_DIR=/my/custom/folder`
-\endverbatim
+ * \subsection documentationgeneration C++ documentation
+ * To generate the C++ documentation, the <a target="_blank" href="http://www.doxygen.org/">doxygen</a> program
+ * is required (version &ge; 1.9.5 is advised). Run the following command in a terminal:
+ * \verbatim make doxygen \endverbatim
+ * Documentation will be generated in a folder named <code>html</code>.
+ *
+ * In case there is not a full setup present and only the documentation should be build the following command sequence
+ * can be used:
+\verbatim cmake -DWITH_GUDHI_THIRD_PARTY=OFF ..
+make doxygen\endverbatim
*
* \subsection helloworld Hello world !
* The <a target="_blank" href="https://github.com/GUDHI/hello-gudhi-world">Hello world for GUDHI</a>
@@ -55,10 +63,9 @@ make doxygen
* The multi-field persistent homology algorithm requires GMP which is a free library for arbitrary-precision
* arithmetic, operating on signed integers, rational numbers, and floating point numbers.
*
- * The following example requires the <a target="_blank" href="http://gmplib.org/">GNU Multiple Precision Arithmetic
+ * The following example requires the <a target="_blank" href="https://gmplib.org/">GNU Multiple Precision Arithmetic
* Library</a> (GMP) and will not be built if GMP is not installed:
- * \li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
*
* Having GMP version 4.2 or higher installed is recommended.
*
@@ -66,190 +73,118 @@ make doxygen
* Some GUDHI modules (cf. \ref main_page "modules list"), and few examples require CGAL, a C++ library that provides
* easy access to efficient and reliable geometric algorithms.
*
- * \note There is no need to install CGAL, you can just <CODE>cmake . && make</CODE> CGAL (or even
- * <CODE>cmake -DCGAL_HEADER_ONLY=ON .</CODE>), thereafter you will be able to compile
- * GUDHI by calling <CODE>cmake -DCGAL_DIR=/your/path/to/CGAL-X.Y .. && make</CODE>
+ * \note There is no need to install CGAL, you can just <CODE>cmake -DCMAKE_BUILD_TYPE=Release . && make</CODE> CGAL
+ * (or even <CODE>cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_HEADER_ONLY=ON .</CODE>), thereafter you will be able to
+ * compile GUDHI by calling <CODE>cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL-X.Y .. && make</CODE>
*
* The procedure to install this library according to
* your operating system is detailed here http://doc.cgal.org/latest/Manual/installation.html
*
* The following examples/utilities require the <a target="_blank" href="http://www.cgal.org/">Computational Geometry Algorithms
* Library</a> (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed:
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_bottleneck_distance_2alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="_bottleneck_distance_2bottleneck_basic_example_8cpp-example.html">
- * Bottleneck_distance/bottleneck_basic_example.cpp</a>
- * \li <a href="_bottleneck_distance_2bottleneck_read_file_8cpp-example.html">
- * Bottleneck_distance/bottleneck_distance.cpp</a>
- * \li <a href="_nerve__g_i_c_2_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_nerve__g_i_c_2_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="_spatial_searching_2example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_custom_kernel_8cpp-example.html">
- * Subsampling/example_custom_kernel.cpp</a>
- * \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="_tangential_complex_2example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="_tangential_complex_2example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_alpha_complex_2_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_basic_example.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection eigen Eigen
* Some GUDHI modules (cf. \ref main_page "modules list"), and few examples require
- * <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> is a C++ template library for linear algebra:
+ * <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> is a C++ template library for linear algebra:
* matrices, vectors, numerical solvers, and related algorithms.
*
- * The following examples/utilities require the <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> and will not be
+ * The following examples/utilities require the <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> and will not be
* built if Eigen is not installed:
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_alpha_complex_2_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="_bottleneck_distance_2alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_spatial_searching_2example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_custom_kernel_8cpp-example.html">
- * Subsampling/example_custom_kernel.cpp</a>
- * \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="_tangential_complex_2example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="_tangential_complex_2example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,cell_complex_from_basic_circle_manifold.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_custom_function.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection tbb Threading Building Blocks
- * <a target="_blank" href="https://www.threadingbuildingblocks.org/">Intel&reg; TBB</a> lets you easily write parallel
+ * <a target="_blank" href="https://github.com/oneapi-src/oneTBB">Intel&reg; TBB</a> lets you easily write parallel
* C++ programs that take full advantage of multicore performance, that are portable and composable, and that have
* future-proof scalability.
*
* Having Intel&reg; TBB installed is recommended to parallelize and accelerate some GUDHI computations.
*
* The following examples/utilities are using Intel&reg; TBB if installed:
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_bitmap_cubical_complex_8cpp-example.html">
- * Bitmap_cubical_complex/cubical_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_bitmap_cubical_complex_periodic_boundary_conditions_8cpp-example.html">
- * Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_random_bitmap_cubical_complex_8cpp-example.html">
- * Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp</a>
- * \li <a href="_nerve__g_i_c_2_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_nerve__g_i_c_2_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="_simplex_tree_2simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a>
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="_simplex_tree_2simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a>
- * \li <a href="_simplex_tree_2graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a>
- * \li <a href="_persistent_cohomology_2alpha_complex_3d_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2alpha_complex_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_persistence_via_boundary_matrix_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp</a>
- * \li <a href="_persistent_cohomology_2persistence_from_file_8cpp-example.html">
- * Persistent_cohomology/persistence_from_file.cpp</a>
- * \li <a href="_persistent_cohomology_2persistence_from_simple_simplex_tree_8cpp-example.html">
- * Persistent_cohomology/persistence_from_simple_simplex_tree.cpp</a>
- * \li <a href="_persistent_cohomology_2plain_homology_8cpp-example.html">
- * Persistent_cohomology/plain_homology.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_persistence_step_by_step_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_step_by_step.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_rips_complex_2example_one_skeleton_rips_from_points_8cpp-example.html">
- * Rips_complex/example_one_skeleton_rips_from_points.cpp</a>
- * \li <a href="_rips_complex_2example_rips_complex_from_off_file_8cpp-example.html">
- * Rips_complex/example_rips_complex_from_off_file.cpp</a>
- * \li <a href="_rips_complex_2rips_distance_matrix_persistence_8cpp-example.html">
- * Rips_complex/rips_distance_matrix_persistence.cpp</a>
- * \li <a href="_rips_complex_2rips_persistence_8cpp-example.html">
- * Rips_complex/rips_persistence.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_nearest_landmark_table_8cpp-example.html">
- * Witness_complex/example_nearest_landmark_table.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,periodic_cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,Random_bitmap_cubical_complex.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp}
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_via_boundary_matrix.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_file.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_one_skeleton_rips_from_points.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_rips_complex_from_off_file.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_nearest_landmark_table.cpp}
*
* \section Contributions Bug reports and contributions
- * Please help us improving the quality of the GUDHI library. You may report bugs or suggestions to:
- * \verbatim Contact: gudhi-users@lists.gforge.inria.fr \endverbatim
+ * Please help us improving the quality of the GUDHI library.
+ * You may <a href="https://github.com/GUDHI/gudhi-devel/issues">report bugs</a> or
+ * <a href="https://gudhi.inria.fr/contact/">contact us</a> for any suggestions.
*
- * GUDHI is open to external contributions. If you want to join our development team, please contact us.
+ * GUDHI is open to external contributions. If you want to join our development team, please take some time to read our
+ * <a href="https://github.com/GUDHI/gudhi-devel/blob/master/.github/CONTRIBUTING.md">contributing guide</a>.
*
*/
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index 6ea10b88..9b7c2853 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -135,7 +135,7 @@
</tr>
</table>
-## Filtrations and reconstructions {#FiltrationsReconstructions}
+## Filtrations
### Alpha complex
<table>
@@ -178,10 +178,10 @@
The set of all simplices is filtered by the radius of their minimal enclosing ball.
</td>
<td width="15%">
- <b>Author:</b> Vincent Rouvreau<br>
+ <b>Author:</b> Vincent Rouvreau, Hind Montassif<br>
<b>Introduced in:</b> GUDHI 2.2.0<br>
- <b>Copyright:</b> MIT [(GPL v3)](../../licensing/)<br>
- <b>Includes:</b> [Miniball](https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html)<br>
+ <b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
+ <b>Requires:</b> \ref cgal
</td>
</tr>
<tr>
@@ -217,6 +217,35 @@
</tr>
</table>
+### Edge collapse
+
+<table>
+ <tr>
+ <td width="35%" rowspan=2>
+ \image html "dominated_edge.png"
+ </td>
+ <td width="50%">
+ Edge collapse is able to reduce any flag filtration to a smaller flag filtration with the same persistence, using
+ only the 1-skeletons of a simplicial complex.
+ The reduction is exact and the persistence homology of the reduced sequence is identical to the persistence
+ homology of the input sequence. The resulting method is simple and extremely efficient.
+
+ Computation of edge collapse and persistent homology of a filtered flag complex via edge collapse as described in
+ \cite edgecollapsearxiv.
+ </td>
+ <td width="15%">
+ <b>Author:</b> Siddharth Pritam, Marc Glisse<br>
+ <b>Introduced in:</b> GUDHI 3.3.0<br>
+ <b>Copyright:</b> MIT
+ </td>
+ </tr>
+ <tr>
+ <td colspan=2 height="25">
+ <b>User manual:</b> \ref edge_collapse
+ </td>
+ </tr>
+</table>
+
### Witness complex
<table>
@@ -268,6 +297,32 @@
</tr>
</table>
+## Manifold reconstructions
+### Coxeter triangulation
+
+<table>
+ <tr>
+ <td width="35%" rowspan=2>
+ \image html "manifold_tracing_on_custom_function_example.png"
+ </td>
+ <td width="50%">
+ Coxeter triangulation module is designed to provide tools for constructing a piecewise-linear approximation of an
+ \f$m\f$-dimensional smooth manifold embedded in \f$ \mathbb{R}^d \f$ using an ambient triangulation.
+ </td>
+ <td width="15%">
+ <b>Author:</b> Siargey Kachanovich<br>
+ <b>Introduced in:</b> GUDHI 3.4.0<br>
+ <b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
+ <b>Requires:</b> \ref eigen &ge; 3.1.0
+ </td>
+ </tr>
+ <tr>
+ <td colspan=2 height="25">
+ <b>User manual:</b> \ref coxeter_triangulation
+ </td>
+ </tr>
+</table>
+
### Tangential complex
<table>
@@ -312,7 +367,7 @@
theory is essentially composed of three elements: topological spaces, their homology groups and an evolution
scheme.
Computation of persistent cohomology using the algorithm of \cite DBLP:journals/dcg/SilvaMV11 and
- \cite DBLP:journals/corr/abs-1208-5018 and the Compressed Annotation Matrix implementation of
+ \cite DBLP:conf/compgeom/DeyFW14 and the Compressed Annotation Matrix implementation of
\cite DBLP:conf/esa/BoissonnatDM13 .
</td>
<td width="15%">
diff --git a/src/common/doc/stylesheet.css b/src/common/doc/stylesheet.css
index 1df177a4..fb030e1f 100644..100755
--- a/src/common/doc/stylesheet.css
+++ b/src/common/doc/stylesheet.css
@@ -1,1367 +1,28 @@
-/* The standard CSS for doxygen 1.8.6 */
-
-body, table, div, p, dl {
- font: 400 14px/22px Roboto,sans-serif;
-}
-
-/* @group Heading Levels */
-
-h1.groupheader {
- font-size: 150%;
-}
-
-.title {
- font: 400 14px/28px Roboto,sans-serif;
- font-size: 150%;
- font-weight: bold;
- margin: 10px 2px;
-}
-
-h2.groupheader {
- border-bottom: 1px solid #879ECB;
- color: #354C7B;
- font-size: 150%;
- font-weight: normal;
- margin-top: 1.75em;
- padding-top: 8px;
- padding-bottom: 4px;
- width: 100%;
-}
-
-h3.groupheader {
- font-size: 100%;
-}
-
-h1, h2, h3, h4, h5, h6 {
- -webkit-transition: text-shadow 0.5s linear;
- -moz-transition: text-shadow 0.5s linear;
- -ms-transition: text-shadow 0.5s linear;
- -o-transition: text-shadow 0.5s linear;
- transition: text-shadow 0.5s linear;
- margin-right: 15px;
-}
-
-h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
- text-shadow: 0 0 15px cyan;
-}
-
-dt {
- font-weight: bold;
-}
-
-div.multicol {
- -moz-column-gap: 1em;
- -webkit-column-gap: 1em;
- -moz-column-count: 3;
- -webkit-column-count: 3;
-}
-
-p.startli, p.startdd {
- margin-top: 2px;
-}
-
-p.starttd {
- margin-top: 0px;
-}
-
-p.endli {
- margin-bottom: 0px;
-}
-
-p.enddd {
- margin-bottom: 4px;
-}
-
-p.endtd {
- margin-bottom: 2px;
-}
-
-/* @end */
-
-caption {
- font-weight: bold;
-}
-
-span.legend {
- font-size: 70%;
- text-align: center;
-}
-
-h3.version {
- font-size: 90%;
- text-align: center;
-}
-
-div.qindex, div.navtab{
- background-color: #EBEFF6;
- border: 1px solid #A3B4D7;
- text-align: center;
-}
-
-div.qindex, div.navpath {
- width: 100%;
- line-height: 140%;
-}
-
-div.navtab {
- margin-right: 15px;
-}
-
-/* @group Link Styling */
-
-a {
- color: #3D578C;
- font-weight: normal;
- text-decoration: none;
-}
-
-.contents a:visited {
- color: #4665A2;
-}
-
-a:hover {
- text-decoration: underline;
-}
-
-a.qindex {
- font-weight: bold;
-}
-
-a.qindexHL {
- font-weight: bold;
- background-color: #9CAFD4;
- color: #ffffff;
- border: 1px double #869DCA;
-}
-
-.contents a.qindexHL:visited {
- color: #ffffff;
-}
-
-a.el {
- font-weight: bold;
-}
-
-a.elRef {
-}
-
-a.code, a.code:visited, a.line, a.line:visited {
- color: #4665A2;
-}
-
-a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited {
- color: #4665A2;
-}
-
-/* @end */
-
-dl.el {
- margin-left: -1cm;
-}
-
-pre.fragment {
- border: 1px solid #C4CFE5;
- background-color: #FBFCFD;
- padding: 4px 6px;
- margin: 4px 8px 4px 2px;
- overflow: auto;
- word-wrap: break-word;
- font-size: 9pt;
- line-height: 125%;
- font-family: monospace, fixed;
- font-size: 105%;
-}
-
-div.fragment {
- padding: 4px 6px;
- margin: 4px 8px 4px 2px;
- background-color: #FBFCFD;
- border: 1px solid #C4CFE5;
-}
-
-div.line {
- font-family: monospace, fixed;
- font-size: 13px;
- min-height: 13px;
- line-height: 1.0;
- text-wrap: unrestricted;
- white-space: -moz-pre-wrap; /* Moz */
- white-space: -pre-wrap; /* Opera 4-6 */
- white-space: -o-pre-wrap; /* Opera 7 */
- white-space: pre-wrap; /* CSS3 */
- word-wrap: break-word; /* IE 5.5+ */
- text-indent: -53px;
- padding-left: 53px;
- padding-bottom: 0px;
- margin: 0px;
- -webkit-transition-property: background-color, box-shadow;
- -webkit-transition-duration: 0.5s;
- -moz-transition-property: background-color, box-shadow;
- -moz-transition-duration: 0.5s;
- -ms-transition-property: background-color, box-shadow;
- -ms-transition-duration: 0.5s;
- -o-transition-property: background-color, box-shadow;
- -o-transition-duration: 0.5s;
- transition-property: background-color, box-shadow;
- transition-duration: 0.5s;
-}
-
-div.line.glow {
- background-color: cyan;
- box-shadow: 0 0 10px cyan;
-}
-
-
-span.lineno {
- padding-right: 4px;
- text-align: right;
- border-right: 2px solid #0F0;
- background-color: #E8E8E8;
- white-space: pre;
-}
-span.lineno a {
- background-color: #D8D8D8;
-}
-
-span.lineno a:hover {
- background-color: #C8C8C8;
-}
-
-div.ah {
- background-color: black;
- font-weight: bold;
- color: #ffffff;
- margin-bottom: 3px;
- margin-top: 3px;
- padding: 0.2em;
- border: solid thin #333;
- border-radius: 0.5em;
- -webkit-border-radius: .5em;
- -moz-border-radius: .5em;
- box-shadow: 2px 2px 3px #999;
- -webkit-box-shadow: 2px 2px 3px #999;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
- background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444));
- background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000);
-}
-
-div.groupHeader {
- margin-left: 16px;
- margin-top: 12px;
- font-weight: bold;
-}
-
-div.groupText {
- margin-left: 16px;
- font-style: italic;
-}
-
-body {
- background-color: white;
- color: black;
- margin: 0;
-}
-
-div.contents {
- margin-top: 10px;
- margin-left: 12px;
- margin-right: 8px;
-}
-
-td.indexkey {
- background-color: #EBEFF6;
- font-weight: bold;
- border: 1px solid #C4CFE5;
- margin: 2px 0px 2px 0;
- padding: 2px 10px;
- white-space: nowrap;
- vertical-align: top;
-}
-
-td.indexvalue {
- background-color: #EBEFF6;
- border: 1px solid #C4CFE5;
- padding: 2px 10px;
- margin: 2px 0px;
-}
-
-tr.memlist {
- background-color: #EEF1F7;
-}
-
-p.formulaDsp {
- text-align: center;
-}
-
-img.formulaDsp {
-
-}
-
-img.formulaInl {
- vertical-align: middle;
-}
-
-div.center {
- text-align: center;
- margin-top: 0px;
- margin-bottom: 0px;
- padding: 0px;
-}
-
-div.center img {
- border: 0px;
-}
-
-address.footer {
- text-align: right;
- padding-right: 12px;
-}
-
-img.footer {
- border: 0px;
- vertical-align: middle;
-}
-
-/* @group Code Colorization */
-
-span.keyword {
- color: #008000
-}
-
-span.keywordtype {
- color: #604020
-}
-
-span.keywordflow {
- color: #e08000
-}
-
-span.comment {
- color: #800000
-}
-
-span.preprocessor {
- color: #806020
-}
-
-span.stringliteral {
- color: #002080
-}
-
-span.charliteral {
- color: #008080
-}
-
-span.vhdldigit {
- color: #ff00ff
-}
-
-span.vhdlchar {
- color: #000000
-}
-
-span.vhdlkeyword {
- color: #700070
-}
-
-span.vhdllogic {
- color: #ff0000
-}
-
-blockquote {
- background-color: #F7F8FB;
- border-left: 2px solid #9CAFD4;
- margin: 0 24px 0 4px;
- padding: 0 12px 0 16px;
-}
-
-/* @end */
-
-/*
-.search {
- color: #003399;
- font-weight: bold;
-}
-
-form.search {
- margin-bottom: 0px;
- margin-top: 0px;
-}
-
-input.search {
- font-size: 75%;
- color: #000080;
- font-weight: normal;
- background-color: #e8eef2;
-}
-*/
-
-td.tiny {
- font-size: 75%;
-}
-
-.dirtab {
- padding: 4px;
- border-collapse: collapse;
- border: 1px solid #A3B4D7;
-}
-
-th.dirtab {
- background: #EBEFF6;
- font-weight: bold;
-}
-
-hr {
- height: 0px;
- border: none;
- border-top: 1px solid #4A6AAA;
-}
-
-hr.footer {
- height: 1px;
-}
-
-/* @group Member Descriptions */
-
-table.memberdecls {
- border-spacing: 0px;
- padding: 0px;
-}
-
-.memberdecls td, .fieldtable tr {
- -webkit-transition-property: background-color, box-shadow;
- -webkit-transition-duration: 0.5s;
- -moz-transition-property: background-color, box-shadow;
- -moz-transition-duration: 0.5s;
- -ms-transition-property: background-color, box-shadow;
- -ms-transition-duration: 0.5s;
- -o-transition-property: background-color, box-shadow;
- -o-transition-duration: 0.5s;
- transition-property: background-color, box-shadow;
- transition-duration: 0.5s;
-}
-
-.memberdecls td.glow, .fieldtable tr.glow {
- background-color: cyan;
- box-shadow: 0 0 15px cyan;
-}
-
-.mdescLeft, .mdescRight,
-.memItemLeft, .memItemRight,
-.memTemplItemLeft, .memTemplItemRight, .memTemplParams {
- background-color: #F9FAFC;
- border: none;
- margin: 4px;
- padding: 1px 0 0 8px;
-}
-
-.mdescLeft, .mdescRight {
- padding: 0px 8px 4px 8px;
- color: #555;
-}
-
-.memSeparator {
- border-bottom: 1px solid #DEE4F0;
- line-height: 1px;
- margin: 0px;
- padding: 0px;
-}
-
-.memItemLeft, .memTemplItemLeft {
- white-space: nowrap;
-}
-
-.memItemRight {
- width: 100%;
-}
-
-.memTemplParams {
- color: #4665A2;
- white-space: nowrap;
- font-size: 80%;
-}
-
-/* @end */
-
-/* @group Member Details */
-
-/* Styles for detailed member documentation */
-
-.memtemplate {
- font-size: 80%;
- color: #4665A2;
- font-weight: normal;
- margin-left: 9px;
-}
-
-.memnav {
- background-color: #EBEFF6;
- border: 1px solid #A3B4D7;
- text-align: center;
- margin: 2px;
- margin-right: 15px;
- padding: 2px;
-}
-
-.mempage {
- width: 100%;
-}
-
-.memitem {
- padding: 0;
- margin-bottom: 10px;
- margin-right: 5px;
- -webkit-transition: box-shadow 0.5s linear;
- -moz-transition: box-shadow 0.5s linear;
- -ms-transition: box-shadow 0.5s linear;
- -o-transition: box-shadow 0.5s linear;
- transition: box-shadow 0.5s linear;
- display: table !important;
- width: 100%;
-}
-
-.memitem.glow {
- box-shadow: 0 0 15px cyan;
-}
-
-.memname {
- font-weight: bold;
- margin-left: 6px;
-}
-
-.memname td {
- vertical-align: bottom;
-}
-
-.memproto, dl.reflist dt {
- border-top: 1px solid #A8B8D9;
- border-left: 1px solid #A8B8D9;
- border-right: 1px solid #A8B8D9;
- padding: 6px 0px 6px 0px;
- color: #253555;
- font-weight: bold;
- text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
- background-image:url('nav_f.png');
- background-repeat:repeat-x;
- background-color: #E2E8F2;
- /* opera specific markup */
- box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- border-top-right-radius: 4px;
- border-top-left-radius: 4px;
- /* firefox specific markup */
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
- -moz-border-radius-topright: 4px;
- -moz-border-radius-topleft: 4px;
- /* webkit specific markup */
- -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- -webkit-border-top-right-radius: 4px;
- -webkit-border-top-left-radius: 4px;
-
-}
-
-.memdoc, dl.reflist dd {
- border-bottom: 1px solid #A8B8D9;
- border-left: 1px solid #A8B8D9;
- border-right: 1px solid #A8B8D9;
- padding: 6px 10px 2px 10px;
- background-color: #FBFCFD;
- border-top-width: 0;
- background-image:url('nav_g.png');
- background-repeat:repeat-x;
- background-color: #FFFFFF;
- /* opera specific markup */
- border-bottom-left-radius: 4px;
- border-bottom-right-radius: 4px;
- box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- /* firefox specific markup */
- -moz-border-radius-bottomleft: 4px;
- -moz-border-radius-bottomright: 4px;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
- /* webkit specific markup */
- -webkit-border-bottom-left-radius: 4px;
- -webkit-border-bottom-right-radius: 4px;
- -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
-}
-
-dl.reflist dt {
- padding: 5px;
-}
-
-dl.reflist dd {
- margin: 0px 0px 10px 0px;
- padding: 5px;
-}
-
-.paramkey {
- text-align: right;
-}
-
-.paramtype {
- white-space: nowrap;
-}
-
-.paramname {
- color: #602020;
- white-space: nowrap;
-}
-.paramname em {
- font-style: normal;
-}
-.paramname code {
- line-height: 14px;
-}
-
-.params, .retval, .exception, .tparams {
- margin-left: 0px;
- padding-left: 0px;
-}
-
-.params .paramname, .retval .paramname {
- font-weight: bold;
- vertical-align: top;
-}
-
-.params .paramtype {
- font-style: italic;
- vertical-align: top;
-}
-
-.params .paramdir {
- font-family: "courier new",courier,monospace;
- vertical-align: top;
-}
-
-table.mlabels {
- border-spacing: 0px;
-}
-
-td.mlabels-left {
- width: 100%;
- padding: 0px;
-}
-
-td.mlabels-right {
- vertical-align: bottom;
- padding: 0px;
- white-space: nowrap;
-}
-
-span.mlabels {
- margin-left: 8px;
-}
-
-span.mlabel {
- background-color: #728DC1;
- border-top:1px solid #5373B4;
- border-left:1px solid #5373B4;
- border-right:1px solid #C4CFE5;
- border-bottom:1px solid #C4CFE5;
- text-shadow: none;
- color: white;
- margin-right: 4px;
- padding: 2px 3px;
- border-radius: 3px;
- font-size: 7pt;
- white-space: nowrap;
- vertical-align: middle;
-}
-
-
-
-/* @end */
-
-/* these are for tree view when not used as main index */
-
-div.directory {
- margin: 10px 0px;
- border-top: 1px solid #A8B8D9;
- border-bottom: 1px solid #A8B8D9;
- width: 100%;
-}
-
-.directory table {
- border-collapse:collapse;
-}
-
-.directory td {
- margin: 0px;
- padding: 0px;
- vertical-align: top;
-}
-
-.directory td.entry {
- white-space: nowrap;
- padding-right: 6px;
- padding-top: 3px;
-}
-
-.directory td.entry a {
- outline:none;
-}
-
-.directory td.entry a img {
- border: none;
-}
-
-.directory td.desc {
- width: 100%;
- padding-left: 6px;
- padding-right: 6px;
- padding-top: 3px;
- border-left: 1px solid rgba(0,0,0,0.05);
-}
-
-.directory tr.even {
- padding-left: 6px;
- background-color: #F7F8FB;
-}
-
-.directory img {
- vertical-align: -30%;
-}
-
-.directory .levels {
- white-space: nowrap;
- width: 100%;
- text-align: right;
- font-size: 9pt;
-}
-
-.directory .levels span {
- cursor: pointer;
- padding-left: 2px;
- padding-right: 2px;
- color: #3D578C;
-}
-
-div.dynheader {
- margin-top: 8px;
- -webkit-touch-callout: none;
- -webkit-user-select: none;
- -khtml-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
-}
-
-address {
- font-style: normal;
- color: #2A3D61;
-}
-
-table.doxtable {
- border-collapse:collapse;
- margin-top: 4px;
- margin-bottom: 4px;
-}
-
-table.doxtable td, table.doxtable th {
- border: 1px solid #2D4068;
- padding: 3px 7px 2px;
-}
-
-table.doxtable th {
- background-color: #374F7F;
- color: #FFFFFF;
- font-size: 110%;
- padding-bottom: 4px;
- padding-top: 5px;
-}
-
-table.fieldtable {
- /*width: 100%;*/
- margin-bottom: 10px;
- border: 1px solid #A8B8D9;
- border-spacing: 0px;
- -moz-border-radius: 4px;
- -webkit-border-radius: 4px;
- border-radius: 4px;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
- -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
- box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
-}
-
-.fieldtable td, .fieldtable th {
- padding: 3px 7px 2px;
-}
-
-.fieldtable td.fieldtype, .fieldtable td.fieldname {
- white-space: nowrap;
- border-right: 1px solid #A8B8D9;
- border-bottom: 1px solid #A8B8D9;
- vertical-align: top;
-}
-
-.fieldtable td.fieldname {
- padding-top: 3px;
-}
-
-.fieldtable td.fielddoc {
- border-bottom: 1px solid #A8B8D9;
- /*width: 100%;*/
-}
-
-.fieldtable td.fielddoc p:first-child {
- margin-top: 0px;
-}
-
-.fieldtable td.fielddoc p:last-child {
- margin-bottom: 2px;
-}
-
-.fieldtable tr:last-child td {
- border-bottom: none;
-}
-
-.fieldtable th {
- background-image:url('nav_f.png');
- background-repeat:repeat-x;
- background-color: #E2E8F2;
- font-size: 90%;
- color: #253555;
- padding-bottom: 4px;
- padding-top: 5px;
- text-align:left;
- -moz-border-radius-topleft: 4px;
- -moz-border-radius-topright: 4px;
- -webkit-border-top-left-radius: 4px;
- -webkit-border-top-right-radius: 4px;
- border-top-left-radius: 4px;
- border-top-right-radius: 4px;
- border-bottom: 1px solid #A8B8D9;
-}
-
-
-.tabsearch {
- top: 0px;
- left: 10px;
- height: 36px;
- background-image: url('tab_b.png');
- z-index: 101;
- overflow: hidden;
- font-size: 13px;
-}
-
-.navpath ul
-{
- font-size: 11px;
- background-image:url('tab_b.png');
- background-repeat:repeat-x;
- background-position: 0 -5px;
- height:30px;
- line-height:30px;
- color:#8AA0CC;
- border:solid 1px #C2CDE4;
- overflow:hidden;
- margin:0px;
- padding:0px;
-}
-
-.navpath li
-{
- list-style-type:none;
- float:left;
- padding-left:10px;
- padding-right:15px;
- background-image:url('bc_s.png');
- background-repeat:no-repeat;
- background-position:right;
- color:#364D7C;
-}
-
-.navpath li.navelem a
-{
- height:32px;
- display:block;
- text-decoration: none;
- outline: none;
- color: #283A5D;
- font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
- text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
- text-decoration: none;
-}
-
-.navpath li.navelem a:hover
-{
- color:#6884BD;
-}
-
-.navpath li.footer
-{
- list-style-type:none;
- float:right;
- padding-left:10px;
- padding-right:15px;
- background-image:none;
- background-repeat:no-repeat;
- background-position:right;
- color:#364D7C;
- font-size: 8pt;
-}
-
-
-div.summary
-{
- float: right;
- font-size: 8pt;
- padding-right: 5px;
- width: 50%;
- text-align: right;
-}
-
-div.summary a
-{
- white-space: nowrap;
-}
-
-div.ingroups
-{
- font-size: 8pt;
- width: 50%;
- text-align: left;
-}
-
-div.ingroups a
-{
- white-space: nowrap;
-}
-
-div.header
-{
- background-image:url('nav_h.png');
- background-repeat:repeat-x;
- background-color: #F9FAFC;
- margin: 0px;
- border-bottom: 1px solid #C4CFE5;
-}
-
-div.headertitle
-{
- padding: 5px 5px 5px 10px;
-}
-
-dl
-{
- padding: 0 0 0 10px;
-}
-
-/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */
-dl.section
-{
- margin-left: 0px;
- padding-left: 0px;
-}
-
-dl.note
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #D0C000;
-}
-
-dl.warning, dl.attention
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #FF0000;
-}
-
-dl.pre, dl.post, dl.invariant
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #00D000;
-}
-
-dl.deprecated
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #505050;
-}
-
-dl.todo
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #00C0E0;
-}
-
-dl.test
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #3030E0;
-}
-
-dl.bug
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #C08050;
-}
-
-dl.section dd {
- margin-bottom: 6px;
-}
-
-
-#projectlogo
-{
- text-align: center;
- vertical-align: bottom;
- border-collapse: separate;
-}
-
-#projectlogo img
-{
- border: 0px none;
-}
-
#projectname
{
- border: 0px none;
- font: 300% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 2px 0px;
+ border: 0px none;
}
-
#projectbrief
{
- font: 60% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 0px;
+ font: 60% Tahoma, Arial,sans-serif;
}
-
#projectnumber
{
- font: 80% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 0px;
-}
-
-#titlearea
-{
- padding: 0px;
- margin: 0px;
- width: 100%;
- border-bottom: 1px solid #5373B4;
-}
-
-.image
-{
- text-align: center;
-}
-
-.dotgraph
-{
- text-align: center;
+ font: 80% Tahoma, Arial,sans-serif;
}
-
-.mscgraph
+.arrow
{
- text-align: center;
-}
-
-.diagraph
-{
- text-align: center;
-}
-
-.caption
-{
- font-weight: bold;
-}
-
-div.zoom
-{
- border: 1px solid #90A5CE;
-}
-
-dl.citelist {
- margin-bottom:50px;
-}
-
-dl.citelist dt {
- color:#334975;
- float:left;
- font-weight:bold;
- margin-right:10px;
- padding:5px;
-}
-
-dl.citelist dd {
- margin:2px 0;
- padding:5px 0;
-}
-
-div.toc {
- padding: 14px 25px;
- background-color: #F4F6FA;
- border: 1px solid #D8DFEE;
- border-radius: 7px 7px 7px 7px;
- float: right;
- height: auto;
- margin: 0 20px 10px 10px;
- width: 200px;
-}
-
-div.toc li {
- background: url("bdwn.png") no-repeat scroll 0 5px transparent;
- font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif;
- margin-top: 5px;
- padding-left: 10px;
- padding-top: 2px;
-}
-
-div.toc h3 {
- font: bold 12px/1.2 Arial,FreeSans,sans-serif;
- color: #4665A2;
- border-bottom: 0 none;
- margin: 0;
-}
-
-div.toc ul {
- list-style: none outside none;
- border: medium none;
- padding: 0px;
-}
-
-div.toc li.level1 {
- margin-left: 0px;
-}
-
-div.toc li.level2 {
- margin-left: 15px;
-}
-
-div.toc li.level3 {
- margin-left: 30px;
-}
-
-div.toc li.level4 {
- margin-left: 45px;
-}
-
-.inherit_header {
- font-weight: bold;
- color: gray;
- cursor: pointer;
- -webkit-touch-callout: none;
- -webkit-user-select: none;
- -khtml-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
-}
-
-.inherit_header td {
- padding: 6px 0px 2px 5px;
-}
-
-.inherit {
- display: none;
-}
-
-tr.heading h2 {
- margin-top: 12px;
- margin-bottom: 4px;
-}
-
-/* tooltip related style info */
-
-.ttc {
- position: absolute;
- display: none;
-}
-
-#powerTip {
- cursor: default;
- white-space: nowrap;
- background-color: white;
- border: 1px solid gray;
- border-radius: 4px 4px 4px 4px;
- box-shadow: 1px 1px 7px gray;
- display: none;
- font-size: smaller;
- max-width: 80%;
- opacity: 0.9;
- padding: 1ex 1em 1em;
- position: absolute;
- z-index: 2147483647;
+ width: auto;
+ height: auto;
+ padding-left: 16px;
}
-
-#powerTip div.ttdoc {
- color: grey;
- font-style: italic;
-}
-
-#powerTip div.ttname a {
- font-weight: bold;
-}
-
-#powerTip div.ttname {
- font-weight: bold;
-}
-
-#powerTip div.ttdeci {
- color: #006318;
-}
-
-#powerTip div {
- margin: 0px;
- padding: 0px;
- font: 12px/16px Roboto,sans-serif;
-}
-
-#powerTip:before, #powerTip:after {
- content: "";
- position: absolute;
- margin: 0px;
-}
-
-#powerTip.n:after, #powerTip.n:before,
-#powerTip.s:after, #powerTip.s:before,
-#powerTip.w:after, #powerTip.w:before,
-#powerTip.e:after, #powerTip.e:before,
-#powerTip.ne:after, #powerTip.ne:before,
-#powerTip.se:after, #powerTip.se:before,
-#powerTip.nw:after, #powerTip.nw:before,
-#powerTip.sw:after, #powerTip.sw:before {
- border: solid transparent;
- content: " ";
- height: 0;
- width: 0;
- position: absolute;
-}
-
-#powerTip.n:after, #powerTip.s:after,
-#powerTip.w:after, #powerTip.e:after,
-#powerTip.nw:after, #powerTip.ne:after,
-#powerTip.sw:after, #powerTip.se:after {
- border-color: rgba(255, 255, 255, 0);
-}
-
-#powerTip.n:before, #powerTip.s:before,
-#powerTip.w:before, #powerTip.e:before,
-#powerTip.nw:before, #powerTip.ne:before,
-#powerTip.sw:before, #powerTip.se:before {
- border-color: rgba(128, 128, 128, 0);
-}
-
-#powerTip.n:after, #powerTip.n:before,
-#powerTip.ne:after, #powerTip.ne:before,
-#powerTip.nw:after, #powerTip.nw:before {
- top: 100%;
-}
-
-#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after {
- border-top-color: #ffffff;
- border-width: 10px;
- margin: 0px -10px;
-}
-#powerTip.n:before {
- border-top-color: #808080;
- border-width: 11px;
- margin: 0px -11px;
-}
-#powerTip.n:after, #powerTip.n:before {
- left: 50%;
-}
-
-#powerTip.nw:after, #powerTip.nw:before {
- right: 14px;
-}
-
-#powerTip.ne:after, #powerTip.ne:before {
- left: 14px;
-}
-
-#powerTip.s:after, #powerTip.s:before,
-#powerTip.se:after, #powerTip.se:before,
-#powerTip.sw:after, #powerTip.sw:before {
- bottom: 100%;
-}
-
-#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after {
- border-bottom-color: #ffffff;
- border-width: 10px;
- margin: 0px -10px;
-}
-
-#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before {
- border-bottom-color: #808080;
- border-width: 11px;
- margin: 0px -11px;
-}
-
-#powerTip.s:after, #powerTip.s:before {
- left: 50%;
-}
-
-#powerTip.sw:after, #powerTip.sw:before {
- right: 14px;
-}
-
-#powerTip.se:after, #powerTip.se:before {
- left: 14px;
-}
-
-#powerTip.e:after, #powerTip.e:before {
- left: 100%;
-}
-#powerTip.e:after {
- border-left-color: #ffffff;
- border-width: 10px;
- top: 50%;
- margin-top: -10px;
-}
-#powerTip.e:before {
- border-left-color: #808080;
- border-width: 11px;
- top: 50%;
- margin-top: -11px;
-}
-
-#powerTip.w:after, #powerTip.w:before {
- right: 100%;
-}
-#powerTip.w:after {
- border-right-color: #ffffff;
- border-width: 10px;
- top: 50%;
- margin-top: -10px;
-}
-#powerTip.w:before {
- border-right-color: #808080;
- border-width: 11px;
- top: 50%;
- margin-top: -11px;
-}
-
-@media print
-{
- #top { display: none; }
- #side-nav { display: none; }
- #nav-path { display: none; }
- body { overflow:visible; }
- h1, h2, h3, h4, h5, h6 { page-break-after: avoid; }
- .summary { display: none; }
- .memitem { page-break-inside: avoid; }
- #doc-content
- {
- margin-left:0 !important;
- height:auto !important;
- width:auto !important;
- overflow:inherit;
- display:inline;
- }
+// With the doxygen versions <= 1.9.2 the default setting 'overflow: hidden;' causes problems.
+// With the commit:
+// Commit: 590198b416cd53313d150428d2f912586065ea0d [590198b]
+// Date: Wednesday, December 1, 2021 1:37:26 PM
+// issue #8924 Horizontal scroll bar missing in HTML for wide class="dotgraph" objects
+// for the doxygen 1.9.3 version this has already been corrected but to run properly with the <= 1.9.2 version
+// this setting is required
+ul {
+ overflow: visible;
}
-
diff --git a/src/common/example/CMakeLists.txt b/src/common/example/CMakeLists.txt
index 583a0027..fa8eb98c 100644
--- a/src/common/example/CMakeLists.txt
+++ b/src/common/example/CMakeLists.txt
@@ -12,6 +12,7 @@ if (DIFF_PATH)
add_test(Common_example_vector_double_off_reader_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/vectordoubleoffreader_result.txt ${CMAKE_CURRENT_BINARY_DIR}/alphacomplexdoc.off.txt)
+ set_tests_properties(Common_example_vector_double_off_reader_diff_files PROPERTIES DEPENDS Common_example_vector_double_off_reader)
endif()
if(NOT CGAL_VERSION VERSION_LESS 4.11.0)
diff --git a/src/common/example/example_CGAL_3D_points_off_reader.cpp b/src/common/example/example_CGAL_3D_points_off_reader.cpp
index 4658d8d5..7f4343f0 100644
--- a/src/common/example/example_CGAL_3D_points_off_reader.cpp
+++ b/src/common/example/example_CGAL_3D_points_off_reader.cpp
@@ -35,7 +35,7 @@ int main(int argc, char **argv) {
int n {};
for (auto point : point_cloud) {
++n;
- std::cout << "Point[" << n << "] = (" << point[0] << ", " << point[1] << ", " << point[2] << ")\n";
+ std::clog << "Point[" << n << "] = (" << point[0] << ", " << point[1] << ", " << point[2] << ")\n";
}
return 0;
}
diff --git a/src/common/example/example_CGAL_points_off_reader.cpp b/src/common/example/example_CGAL_points_off_reader.cpp
index f45683a5..b2bcdbcf 100644
--- a/src/common/example/example_CGAL_points_off_reader.cpp
+++ b/src/common/example/example_CGAL_points_off_reader.cpp
@@ -36,10 +36,10 @@ int main(int argc, char **argv) {
int n {};
for (auto point : point_cloud) {
- std::cout << "Point[" << n << "] = ";
+ std::clog << "Point[" << n << "] = ";
for (std::size_t i {0}; i < point.size(); i++)
- std::cout << point[i] << " ";
- std::cout << "\n";
+ std::clog << point[i] << " ";
+ std::clog << "\n";
++n;
}
return 0;
diff --git a/src/common/include/gudhi/Clock.h b/src/common/include/gudhi/Clock.h
index 00ab2f27..6966aaaa 100644
--- a/src/common/include/gudhi/Clock.h
+++ b/src/common/include/gudhi/Clock.h
@@ -41,9 +41,9 @@ class Clock {
return msg;
}
- // Print current value to std::cout
+ // Print current value to std::clog
void print() const {
- std::cout << *this << std::endl;
+ std::clog << *this << std::endl;
}
friend std::ostream& operator<<(std::ostream& stream, const Clock& clock) {
diff --git a/src/common/include/gudhi/Debug_utils.h b/src/common/include/gudhi/Debug_utils.h
index 38abc06d..f8375b00 100644
--- a/src/common/include/gudhi/Debug_utils.h
+++ b/src/common/include/gudhi/Debug_utils.h
@@ -27,14 +27,14 @@
#define GUDHI_CHECK_code(CODE)
#endif
-#define PRINT(a) std::cerr << #a << ": " << (a) << " (DISP)" << std::endl
+#define PRINT(a) std::clog << #a << ": " << (a) << " (DISP)" << std::endl
// #define DBG_VERBOSE
#ifdef DBG_VERBOSE
- #define DBG(a) std::cout << "DBG: " << (a) << std::endl
- #define DBGMSG(a, b) std::cout << "DBG: " << a << b << std::endl
- #define DBGVALUE(a) std::cout << "DBG: " << #a << ": " << a << std::endl
- #define DBGCONT(a) std::cout << "DBG: container " << #a << " -> "; for (auto x : a) std::cout << x << ","; std::cout << std::endl
+ #define DBG(a) std::clog << "DBG: " << (a) << std::endl
+ #define DBGMSG(a, b) std::clog << "DBG: " << a << b << std::endl
+ #define DBGVALUE(a) std::clog << "DBG: " << #a << ": " << a << std::endl
+ #define DBGCONT(a) std::clog << "DBG: container " << #a << " -> "; for (auto x : a) std::clog << x << ","; std::clog << std::endl
#else
#define DBG(a) (void) 0
#define DBGMSG(a, b) (void) 0
diff --git a/src/common/include/gudhi/Points_3D_off_io.h b/src/common/include/gudhi/Points_3D_off_io.h
index 2d110af3..4f74fd4b 100644
--- a/src/common/include/gudhi/Points_3D_off_io.h
+++ b/src/common/include/gudhi/Points_3D_off_io.h
@@ -41,7 +41,7 @@ class Points_3D_off_visitor_reader {
*/
void init(int dim, int num_vertices, int num_faces, int num_edges) {
#ifdef DEBUG_TRACES
- std::cout << "Points_3D_off_visitor_reader::init - dim=" << dim << " - num_vertices=" <<
+ std::clog << "Points_3D_off_visitor_reader::init - dim=" << dim << " - num_vertices=" <<
num_vertices << " - num_faces=" << num_faces << " - num_edges=" << num_edges << std::endl;
#endif // DEBUG_TRACES
if (dim == 3) {
@@ -74,11 +74,11 @@ class Points_3D_off_visitor_reader {
void point(const std::vector<double>& point) {
if (valid_) {
#ifdef DEBUG_TRACES
- std::cout << "Points_3D_off_visitor_reader::point ";
+ std::clog << "Points_3D_off_visitor_reader::point ";
for (auto coordinate : point) {
- std::cout << coordinate << " | ";
+ std::clog << coordinate << " | ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
#endif // DEBUG_TRACES
// Fill the point cloud
point_cloud_.push_back(Point_3(point[0], point[1], point[2]));
@@ -125,7 +125,7 @@ class Points_3D_off_visitor_reader {
* This example loads points from an OFF file and builds a vector of CGAL points in dimension 3.
* Then, it is asked to display the points.
*
- * @include common/example_CGAL_3D_points_off_reader.cpp
+ * @include example_CGAL_3D_points_off_reader.cpp
*
* When launching:
*
@@ -134,7 +134,7 @@ class Points_3D_off_visitor_reader {
*
* the program output is:
*
- * @include common/cgal3Doffreader_result.txt
+ * @include cgal3Doffreader_result.txt
*/
template<typename Point_3>
class Points_3D_off_reader {
diff --git a/src/common/include/gudhi/Points_off_io.h b/src/common/include/gudhi/Points_off_io.h
index 99371d56..3aa8afd8 100644
--- a/src/common/include/gudhi/Points_off_io.h
+++ b/src/common/include/gudhi/Points_off_io.h
@@ -40,7 +40,7 @@ class Points_off_visitor_reader {
*/
void init(int dim, int num_vertices, int num_faces, int num_edges) {
#ifdef DEBUG_TRACES
- std::cout << "Points_off_visitor_reader::init - dim=" << dim << " - num_vertices=" <<
+ std::clog << "Points_off_visitor_reader::init - dim=" << dim << " - num_vertices=" <<
num_vertices << " - num_faces=" << num_faces << " - num_edges=" << num_edges << std::endl;
#endif // DEBUG_TRACES
if (num_faces > 0) {
@@ -66,11 +66,11 @@ class Points_off_visitor_reader {
*/
void point(const std::vector<double>& point) {
#ifdef DEBUG_TRACES
- std::cout << "Points_off_visitor_reader::point ";
+ std::clog << "Points_off_visitor_reader::point ";
for (auto coordinate : point) {
- std::cout << coordinate << " | ";
+ std::clog << coordinate << " | ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
#endif // DEBUG_TRACES
// Fill the point cloud
point_cloud.push_back(Point_d(point.begin(), point.end()));
@@ -107,7 +107,7 @@ class Points_off_visitor_reader {
* This example loads points from an OFF file and builds a vector of points (vector of double).
* Then, it is asked to display the points.
*
- * \include common/example_vector_double_points_off_reader.cpp
+ * \include example_vector_double_points_off_reader.cpp
*
* When launching:
*
@@ -116,7 +116,7 @@ class Points_off_visitor_reader {
*
* the program outputs a file ../../data/points/alphacomplexdoc.off.txt:
*
- * \include common/vectordoubleoffreader_result.txt
+ * \include vectordoubleoffreader_result.txt
*/
template<typename Point_d>
class Points_off_reader {
diff --git a/src/common/include/gudhi/Unitary_tests_utils.h b/src/common/include/gudhi/Unitary_tests_utils.h
index 9b86460a..9f995d01 100644
--- a/src/common/include/gudhi/Unitary_tests_utils.h
+++ b/src/common/include/gudhi/Unitary_tests_utils.h
@@ -20,7 +20,7 @@ template<typename FloatingType >
void GUDHI_TEST_FLOAT_EQUALITY_CHECK(FloatingType a, FloatingType b,
FloatingType epsilon = std::numeric_limits<FloatingType>::epsilon()) {
#ifdef DEBUG_TRACES
- std::cout << "GUDHI_TEST_FLOAT_EQUALITY_CHECK - " << a << " versus " << b
+ std::clog << "GUDHI_TEST_FLOAT_EQUALITY_CHECK - " << a << " versus " << b
<< " | diff = " << std::fabs(a - b) << " - epsilon = " << epsilon << std::endl;
#endif
BOOST_CHECK(std::fabs(a - b) <= epsilon);
@@ -32,7 +32,7 @@ template<typename FloatingType >
FloatingType GUDHI_PROTECT_FLOAT(FloatingType value) {
volatile FloatingType protected_value = value;
#ifdef DEBUG_TRACES
- std::cout << "GUDHI_PROTECT_FLOAT - " << protected_value << std::endl;
+ std::clog << "GUDHI_PROTECT_FLOAT - " << protected_value << std::endl;
#endif
return protected_value;
}
diff --git a/src/common/include/gudhi/distance_functions.h b/src/common/include/gudhi/distance_functions.h
index 94cf9ccc..5e5a1e31 100644
--- a/src/common/include/gudhi/distance_functions.h
+++ b/src/common/include/gudhi/distance_functions.h
@@ -13,8 +13,6 @@
#include <gudhi/Debug_utils.h>
-#include <gudhi/Miniball.hpp>
-
#include <boost/range/metafunctions.hpp>
#include <boost/range/size.hpp>
@@ -59,53 +57,6 @@ class Euclidean_distance {
}
};
-/** @brief Compute the radius of the minimal enclosing ball between Points given by a range of coordinates.
- * The points are assumed to have the same dimension. */
-class Minimal_enclosing_ball_radius {
- public:
- /** \brief Minimal_enclosing_ball_radius from two points.
- *
- * @param[in] point_1 First point.
- * @param[in] point_2 second point.
- * @return The minimal enclosing ball radius for the two points (aka. Euclidean distance / 2.).
- *
- * \tparam Point must be a range of Cartesian coordinates.
- *
- */
- template< typename Point >
- typename std::iterator_traits<typename boost::range_iterator<Point>::type>::value_type
- operator()(const Point& point_1, const Point& point_2) const {
- return Euclidean_distance()(point_1, point_2) / 2.;
- }
- /** \brief Minimal_enclosing_ball_radius from a point cloud.
- *
- * @param[in] point_cloud The points.
- * @return The minimal enclosing ball radius for the points.
- *
- * \tparam Point_cloud must be a range of points with Cartesian coordinates.
- * Point_cloud is a range over a range of Coordinate.
- *
- */
- template< typename Point_cloud,
- typename Point_iterator = typename boost::range_const_iterator<Point_cloud>::type,
- typename Point = typename std::iterator_traits<Point_iterator>::value_type,
- typename Coordinate_iterator = typename boost::range_const_iterator<Point>::type,
- typename Coordinate = typename std::iterator_traits<Coordinate_iterator>::value_type>
- Coordinate
- operator()(const Point_cloud& point_cloud) const {
- using Min_sphere = Miniball::Miniball<Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- Min_sphere ms(boost::size(*point_cloud.begin()), point_cloud.begin(), point_cloud.end());
-#ifdef DEBUG_TRACES
- std::cout << "Minimal_enclosing_ball_radius = " << std::sqrt(ms.squared_radius()) << " | nb points = "
- << boost::size(point_cloud) << " | dimension = "
- << boost::size(*point_cloud.begin()) << std::endl;
-#endif // DEBUG_TRACES
-
- return std::sqrt(ms.squared_radius());
- }
-};
-
} // namespace Gudhi
#endif // DISTANCE_FUNCTIONS_H_
diff --git a/src/common/include/gudhi/graph_simplicial_complex.h b/src/common/include/gudhi/graph_simplicial_complex.h
index b8508697..da9dee7d 100644
--- a/src/common/include/gudhi/graph_simplicial_complex.h
+++ b/src/common/include/gudhi/graph_simplicial_complex.h
@@ -19,6 +19,9 @@
#include <tuple> // for std::tie
namespace Gudhi {
+/** @file
+ * @brief Graph simplicial complex methods
+ */
/* Edge tag for Boost PropertyGraph. */
struct edge_filtration_t {
@@ -46,6 +49,8 @@ using Proximity_graph = typename boost::adjacency_list < boost::vecS, boost::vec
* If points contains n elements, the proximity graph is the graph with n vertices, and an edge [u,v] iff the
* distance function between points u and v is smaller than threshold.
*
+ * \tparam SimplicialComplexForProximityGraph furnishes `Filtration_value` and `Vertex_handle` type definitions.
+ *
* \tparam ForwardPointRange furnishes `.begin()` and `.end()` methods.
*
* \tparam Distance furnishes `operator()(const Point& p1, const Point& p2)`, where
diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h
index 9dd88ac4..25a7392d 100644
--- a/src/common/include/gudhi/random_point_generators.h
+++ b/src/common/include/gudhi/random_point_generators.h
@@ -18,6 +18,7 @@
#include <CGAL/version.h> // for CGAL_VERSION_NR
#include <vector> // for vector<>
+#include <boost/math/constants/constants.hpp> // for pi constant
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
@@ -149,6 +150,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_moment_curve(std::size_
template <typename Kernel/*, typename TC_basis*/>
std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t num_points, double R, double r,
bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -164,11 +167,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t nu
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
Point p = construct_point(k,
(R + r * std::cos(u)) * std::cos(v),
@@ -182,11 +185,13 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t nu
// "Private" function used by generate_points_on_torus_d
template <typename Kernel, typename OutputIterator>
-static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::size_t num_slices,
+static void generate_grid_points_on_torus_d(const Kernel &k, int dim, std::size_t num_slices,
OutputIterator out,
double radius_noise_percentage = 0.,
std::vector<typename Kernel::FT> current_point =
std::vector<typename Kernel::FT>()) {
+ using namespace boost::math::double_constants;
+
CGAL::Random rng;
int point_size = static_cast<int>(current_point.size());
if (point_size == 2 * dim) {
@@ -200,18 +205,20 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si
(100. + radius_noise_percentage) / 100.);
}
std::vector<typename Kernel::FT> cp2 = current_point;
- double alpha = 6.2832 * slice_idx / num_slices;
+ double alpha = two_pi * slice_idx / num_slices;
cp2.push_back(radius_noise_ratio * std::cos(alpha));
cp2.push_back(radius_noise_ratio * std::sin(alpha));
- generate_uniform_points_on_torus_d(
+ generate_grid_points_on_torus_d(
k, dim, num_slices, out, radius_noise_percentage, cp2);
}
}
}
template <typename Kernel>
-std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num_points, int dim, bool uniform = false,
+std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num_points, int dim, std::string sample = "random",
double radius_noise_percentage = 0.) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -219,9 +226,9 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num
std::vector<Point> points;
points.reserve(num_points);
- if (uniform) {
- std::size_t num_slices = (std::size_t)std::pow(num_points, 1. / dim);
- generate_uniform_points_on_torus_d(
+ if (sample == "grid") {
+ std::size_t num_slices = (std::size_t)std::pow(num_points + .5, 1. / dim); // add .5 to avoid rounding down with numerical approximations
+ generate_grid_points_on_torus_d(
k, dim, num_slices, std::back_inserter(points), radius_noise_percentage);
} else {
for (std::size_t i = 0; i < num_points;) {
@@ -234,7 +241,7 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num
std::vector<typename Kernel::FT> pt;
pt.reserve(dim * 2);
for (int curdim = 0; curdim < dim; ++curdim) {
- FT alpha = rng.get_double(0, 6.2832);
+ FT alpha = rng.get_double(0, two_pi);
pt.push_back(radius_noise_ratio * std::cos(alpha));
pt.push_back(radius_noise_ratio * std::sin(alpha));
}
@@ -357,6 +364,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_two_spheres_d(std::size
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std::size_t num_points,
double sphere_radius) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::FT FT;
typedef typename Kernel::Point_d Point;
Kernel k;
@@ -370,7 +379,7 @@ std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std:
for (std::size_t i = 0; i < num_points;) {
Point p_sphere = *generator++; // First 3 coords
- FT alpha = rng.get_double(0, 6.2832);
+ FT alpha = rng.get_double(0, two_pi);
std::vector<FT> pt(5);
pt[0] = k_coord(p_sphere, 0);
pt[1] = k_coord(p_sphere, 1);
@@ -388,6 +397,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std:
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::size_t num_points, double a, double b,
bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -403,11 +414,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::si
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v);
Point p = construct_point(k,
@@ -424,6 +435,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::si
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_4D(std::size_t num_points, double a, double b,
double noise = 0., bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -439,11 +452,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_4D(std::si
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
Point p = construct_point(k,
(a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)),
@@ -463,6 +476,8 @@ template <typename Kernel>
std::vector<typename Kernel::Point_d>
generate_points_on_klein_bottle_variant_5D(
std::size_t num_points, double a, double b, bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -478,11 +493,11 @@ generate_points_on_klein_bottle_variant_5D(
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
FT x1 = (a + b * cos(v)) * cos(u);
FT x2 = (a + b * cos(v)) * sin(u);
diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h
index db31bf5c..a7d82541 100644
--- a/src/common/include/gudhi/reader_utils.h
+++ b/src/common/include/gudhi/reader_utils.h
@@ -14,7 +14,7 @@
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Debug_utils.h>
-#include <boost/function_output_iterator.hpp>
+# include <boost/iterator/function_output_iterator.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <iostream>
@@ -220,7 +220,7 @@ template <typename Filtration_value>
std::vector<std::vector<Filtration_value>> read_lower_triangular_matrix_from_csv_file(const std::string& filename,
const char separator = ';') {
#ifdef DEBUG_TRACES
- std::cout << "Using procedure read_lower_triangular_matrix_from_csv_file \n";
+ std::clog << "Using procedure read_lower_triangular_matrix_from_csv_file \n";
#endif // DEBUG_TRACES
std::vector<std::vector<Filtration_value>> result;
std::ifstream in;
@@ -231,7 +231,7 @@ std::vector<std::vector<Filtration_value>> read_lower_triangular_matrix_from_csv
std::string line;
- // the first line is emtpy, so we ignore it:
+ // the first line is empty, so we ignore it:
std::getline(in, line);
std::vector<Filtration_value> values_in_this_line;
result.push_back(values_in_this_line);
@@ -272,12 +272,12 @@ std::vector<std::vector<Filtration_value>> read_lower_triangular_matrix_from_csv
in.close();
#ifdef DEBUG_TRACES
- std::cerr << "Here is the matrix we read : \n";
+ std::clog << "Here is the matrix we read : \n";
for (size_t i = 0; i != result.size(); ++i) {
for (size_t j = 0; j != result[i].size(); ++j) {
- std::cerr << result[i][j] << " ";
+ std::clog << result[i][j] << " ";
}
- std::cerr << std::endl;
+ std::clog << std::endl;
}
#endif // DEBUG_TRACES
@@ -294,7 +294,7 @@ Note: the function does not check that birth <= death.
template <typename OutputIterator>
void read_persistence_intervals_and_dimension(std::string const& filename, OutputIterator out) {
#ifdef DEBUG_TRACES
- std::cout << "read_persistence_intervals_and_dimension - " << filename << std::endl;
+ std::clog << "read_persistence_intervals_and_dimension - " << filename << std::endl;
#endif // DEBUG_TRACES
std::ifstream in(filename);
if (!in.is_open()) {
@@ -311,11 +311,11 @@ void read_persistence_intervals_and_dimension(std::string const& filename, Outpu
double numbers[4];
int n = sscanf(line.c_str(), "%lf %lf %lf %lf", &numbers[0], &numbers[1], &numbers[2], &numbers[3]);
#ifdef DEBUG_TRACES
- std::cout << "[" << n << "] = ";
+ std::clog << "[" << n << "] = ";
for (int i = 0; i < n; i++) {
- std::cout << numbers[i] << ",";
+ std::clog << numbers[i] << ",";
}
- std::cout << std::endl;
+ std::clog << std::endl;
#endif // DEBUG_TRACES
if (n >= 2) {
int dim = (n >= 3 ? static_cast<int>(numbers[n - 3]) : -1);
diff --git a/src/common/include/gudhi/writing_persistence_to_file.h b/src/common/include/gudhi/writing_persistence_to_file.h
index 2e36b831..3a0df1a8 100644
--- a/src/common/include/gudhi/writing_persistence_to_file.h
+++ b/src/common/include/gudhi/writing_persistence_to_file.h
@@ -48,7 +48,7 @@ class Persistence_interval_common {
: birth_(birth), death_(death), dimension_(dim), arith_element_(field) {}
/**
- * Operator to compare two persistence pairs. During the comparision all the
+ * Operator to compare two persistence pairs. During the comparison all the
* fields: birth, death, dimensiona and arith_element_ are taken into account
* and they all have to be equal for two pairs to be equal.
**/
@@ -65,7 +65,7 @@ class Persistence_interval_common {
/**
* Operator to compare objects of a type Persistence_interval_common.
* One intervals is smaller than the other if it has lower persistence.
- * Note that this operator do not take Arith_element into account when doing comparisions.
+ * Note that this operator do not take Arith_element into account when doing comparisons.
**/
bool operator<(const Persistence_interval_common& i2) const {
return fabs(this->death_ - this->birth_) < fabs(i2.death_ - i2.birth_);
diff --git a/src/common/test/test_distance_matrix_reader.cpp b/src/common/test/test_distance_matrix_reader.cpp
index bb619a29..92e899b8 100644
--- a/src/common/test/test_distance_matrix_reader.cpp
+++ b/src/common/test/test_distance_matrix_reader.cpp
@@ -28,15 +28,15 @@ BOOST_AUTO_TEST_CASE( lower_triangular_distance_matrix )
',');
for (auto& i : from_lower_triangular) {
for (auto j : i) {
- std::cout << j << " ";
+ std::clog << j << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
- std::cout << "from_lower_triangular size = " << from_lower_triangular.size() << std::endl;
+ std::clog << "from_lower_triangular size = " << from_lower_triangular.size() << std::endl;
BOOST_CHECK(from_lower_triangular.size() == 5);
for (std::size_t i = 0; i < from_lower_triangular.size(); i++) {
- std::cout << "from_lower_triangular[" << i << "] size = " << from_lower_triangular[i].size() << std::endl;
+ std::clog << "from_lower_triangular[" << i << "] size = " << from_lower_triangular[i].size() << std::endl;
BOOST_CHECK(from_lower_triangular[i].size() == i);
}
std::vector<double> expected = {1};
@@ -57,17 +57,17 @@ BOOST_AUTO_TEST_CASE( full_square_distance_matrix )
{
Distance_matrix from_full_square;
// Read full_square_distance_matrix.csv file where the separator is the default one ';'
- from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file<double>("full_square_distance_matrix.csv");
+ from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file<double>("full_square_distance_matrix.csv", ';');
for (auto& i : from_full_square) {
for (auto j : i) {
- std::cout << j << " ";
+ std::clog << j << " ";
}
- std::cout << std::endl;
+ std::clog << std::endl;
}
- std::cout << "from_full_square size = " << from_full_square.size() << std::endl;
+ std::clog << "from_full_square size = " << from_full_square.size() << std::endl;
BOOST_CHECK(from_full_square.size() == 7);
for (std::size_t i = 0; i < from_full_square.size(); i++) {
- std::cout << "from_full_square[" << i << "] size = " << from_full_square[i].size() << std::endl;
+ std::clog << "from_full_square[" << i << "] size = " << from_full_square[i].size() << std::endl;
BOOST_CHECK(from_full_square[i].size() == i);
}
}
diff --git a/src/common/test/test_persistence_intervals_reader.cpp b/src/common/test/test_persistence_intervals_reader.cpp
index 8fb4377d..ac8d0981 100644
--- a/src/common/test/test_persistence_intervals_reader.cpp
+++ b/src/common/test/test_persistence_intervals_reader.cpp
@@ -35,18 +35,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_without_dimension )
Persistence_intervals_by_dimension persistence_intervals_by_dimension =
Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_without_dimension.pers");
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
for (auto map_iter : expected_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n";
for (auto map_iter : persistence_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension);
@@ -60,13 +60,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_without_dimension )
Persistence_intervals persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_without_dimension.pers");
- std::cout << "\nread_persistence_intervals_in_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - read\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -103,18 +103,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
Persistence_intervals_by_dimension persistence_intervals_by_dimension =
Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_with_dimension.pers");
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
for (auto map_iter : expected_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n";
for (auto map_iter : persistence_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension);
@@ -128,13 +128,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
Persistence_intervals persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers");
- std::cout << "\nread_persistence_intervals_in_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -143,13 +143,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 0);
- std::cout << "\nread_persistence_intervals_in_dimension 0 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 0 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 0 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 0 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -159,13 +159,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 1);
- std::cout << "\nread_persistence_intervals_in_dimension 1 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 1 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 1 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 1 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -173,13 +173,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 2);
- std::cout << "\nread_persistence_intervals_in_dimension 2 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 2 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 2 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 2 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -188,13 +188,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_dimension )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_dimension.pers", 3);
- std::cout << "\nread_persistence_intervals_in_dimension 3 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 3 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 3 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 3 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -212,18 +212,18 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
Persistence_intervals_by_dimension persistence_intervals_by_dimension =
Gudhi::read_persistence_intervals_grouped_by_dimension("persistence_intervals_with_field.pers");
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - expected\n";
for (auto map_iter : expected_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
- std::cout << "\nread_persistence_intervals_grouped_by_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_grouped_by_dimension - read\n";
for (auto map_iter : persistence_intervals_by_dimension) {
- std::cout << "key=" << map_iter.first;
+ std::clog << "key=" << map_iter.first;
for (auto vec_iter : map_iter.second)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
}
BOOST_CHECK(persistence_intervals_by_dimension == expected_intervals_by_dimension);
@@ -237,13 +237,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
Persistence_intervals persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers");
- std::cout << "\nread_persistence_intervals_in_dimension - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -252,13 +252,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 0);
- std::cout << "\nread_persistence_intervals_in_dimension 0 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 0 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 0 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 0 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -268,13 +268,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 1);
- std::cout << "\nread_persistence_intervals_in_dimension 1 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 1 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 1 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 1 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -282,13 +282,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 2);
- std::cout << "\nread_persistence_intervals_in_dimension 2 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 2 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 2 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 2 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
@@ -297,13 +297,13 @@ BOOST_AUTO_TEST_CASE( persistence_intervals_with_field )
persistence_intervals_in_dimension =
Gudhi::read_persistence_intervals_in_dimension("persistence_intervals_with_field.pers", 3);
- std::cout << "\nread_persistence_intervals_in_dimension 3 - expected\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 3 - expected\n";
for (auto vec_iter : expected_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
- std::cout << "\nread_persistence_intervals_in_dimension 3 - read\n";
+ std::clog << "\nread_persistence_intervals_in_dimension 3 - read\n";
for (auto vec_iter : persistence_intervals_in_dimension)
- std::cout << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
+ std::clog << " [" << vec_iter.first << " ," << vec_iter.second << "] ";
BOOST_CHECK(persistence_intervals_in_dimension == expected_intervals_in_dimension);
diff --git a/src/common/utilities/off_file_from_shape_generator.cpp b/src/common/utilities/off_file_from_shape_generator.cpp
index 6efef4fc..71ede434 100644
--- a/src/common/utilities/off_file_from_shape_generator.cpp
+++ b/src/common/utilities/off_file_from_shape_generator.cpp
@@ -135,7 +135,7 @@ int main(int argc, char **argv) {
if (dimension == 3)
points = Gudhi::generate_points_on_torus_3D<K>(points_number, dimension, radius, radius/2.);
else
- points = Gudhi::generate_points_on_torus_d<K>(points_number, dimension, true);
+ points = Gudhi::generate_points_on_torus_d<K>(points_number, dimension, "grid");
break;
case Data_shape::klein:
switch (dimension) {
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index b558d4c4..39e2acd4 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -14,6 +14,18 @@ function( add_GUDHI_PYTHON_lib THE_LIB )
endif(EXISTS ${THE_LIB})
endfunction( add_GUDHI_PYTHON_lib )
+function( add_GUDHI_PYTHON_lib_dir)
+ # Argument may be a list (specifically on windows with release/debug paths)
+ foreach(THE_LIB_DIR IN LISTS ARGN)
+ # deals when it is not set - error on windows
+ if(EXISTS ${THE_LIB_DIR})
+ set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${THE_LIB_DIR}', " PARENT_SCOPE)
+ else()
+ message("add_GUDHI_PYTHON_lib_dir - '${THE_LIB_DIR}' does not exist")
+ endif()
+ endforeach()
+endfunction( add_GUDHI_PYTHON_lib_dir )
+
# THE_TEST is the python test file name (without .py extension) containing tests functions
function( add_gudhi_py_test THE_TEST )
if(PYTEST_FOUND)
@@ -32,8 +44,16 @@ function( add_gudhi_debug_info DEBUG_INFO )
endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
- if(CYTHON_FOUND)
- set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ")
+ if(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
+ add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
+ # PyBind11 modules
+ set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets', ")
+
+ # Cython modules
+ set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_utils', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'simplex_tree', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'rips_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'cubical_complex', ")
@@ -42,7 +62,6 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'reader_utils', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'witness_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'strong_witness_complex', ")
- set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'nerve_gic', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'subsampling', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'tangential_complex', ")
@@ -51,7 +70,11 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_strong_witness_complex', ")
# Modules that should not be auto-imported in __init__.py
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'tensorflow', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'dtm_rips_complex', ")
add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}")
add_gudhi_debug_info("Cython version ${CYTHON_VERSION}")
@@ -73,6 +96,32 @@ if(PYTHONINTERP_FOUND)
if(OT_FOUND)
add_gudhi_debug_info("POT version ${OT_VERSION}")
endif()
+ if(HNSWLIB_FOUND)
+ # Does not have a version number...
+ add_gudhi_debug_info("HNSWlib found")
+ endif()
+ if(TORCH_FOUND)
+ add_gudhi_debug_info("PyTorch version ${TORCH_VERSION}")
+ endif()
+ if(PYKEOPS_FOUND)
+ add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}")
+ endif()
+ if(EAGERPY_FOUND)
+ add_gudhi_debug_info("EagerPy version ${EAGERPY_VERSION}")
+ endif()
+ if(TENSORFLOW_FOUND)
+ add_gudhi_debug_info("TensorFlow version ${TENSORFLOW_VERSION}")
+ endif()
+ if(SPHINX_FOUND)
+ add_gudhi_debug_info("Sphinx version ${SPHINX_VERSION}")
+ endif()
+ if(SPHINX_PARAMLINKS_FOUND)
+ add_gudhi_debug_info("Sphinx-paramlinks version ${SPHINX_PARAMLINKS_VERSION}")
+ endif()
+ if(PYTHON_DOCS_THEME_FOUND)
+ # Does not have a version number...
+ add_gudhi_debug_info("python_docs_theme found")
+ endif()
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ")
@@ -80,12 +129,14 @@ if(PYTHONINTERP_FOUND)
# Gudhi and CGAL compilation option
if(MSVC)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'/std:c++17', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'/fp:strict', ")
else(MSVC)
- set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-std=c++14', ")
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-std=c++17', ")
endif(MSVC)
if(CMAKE_COMPILER_IS_GNUCXX)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-frounding-math', ")
+ set(GUDHI_PYBIND11_EXTRA_COMPILE_ARGS "${GUDHI_PYBIND11_EXTRA_COMPILE_ARGS}'-fvisibility=hidden', ")
endif(CMAKE_COMPILER_IS_GNUCXX)
if (CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-fp-model strict', ")
@@ -101,37 +152,47 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
endif (EIGEN3_FOUND)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'off_reader', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'simplex_tree', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'rips_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'periodic_cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'reader_utils', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_utils', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'simplex_tree', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'rips_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'periodic_cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'reader_utils', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'strong_witness_complex', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'clustering/_tomato', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'bottleneck', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/_points', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ")
endif ()
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
+ endif ()
+
+ # from windows vcpkg eigen 3.4.0#2 : build fails with
+ # error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+ # cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+ # Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+ if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int', ")
+ endif()
+
+ add_gudhi_debug_info("Boost version ${Boost_VERSION}")
if(CGAL_FOUND)
- can_cgal_use_cxx11_thread_local()
- if (NOT CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
- if(CMAKE_BUILD_TYPE MATCHES Debug)
- add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_DEBUG}")
- else()
- add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_RELEASE}")
- endif()
- message("** Add Boost ${Boost_LIBRARY_DIRS}")
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${Boost_LIBRARY_DIRS}', ")
- endif()
+ if(NOT CGAL_VERSION VERSION_LESS 5.3.0)
+ # CGAL_HEADER_ONLY has been dropped for CGAL >= 5.3. Only the header-only version is supported.
+ set(CGAL_HEADER_ONLY True)
+ endif(NOT CGAL_VERSION VERSION_LESS 5.3.0)
# Add CGAL compilation args
if(CGAL_HEADER_ONLY)
add_gudhi_debug_info("CGAL header only version ${CGAL_VERSION}")
@@ -139,7 +200,7 @@ if(PYTHONINTERP_FOUND)
else(CGAL_HEADER_ONLY)
add_gudhi_debug_info("CGAL version ${CGAL_VERSION}")
add_GUDHI_PYTHON_lib("${CGAL_LIBRARY}")
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${CGAL_LIBRARIES_DIR}', ")
+ add_GUDHI_PYTHON_lib_dir(${CGAL_LIBRARIES_DIR})
message("** Add CGAL ${CGAL_LIBRARIES_DIR}")
# If CGAL is not header only, CGAL library may link with boost system,
if(CMAKE_BUILD_TYPE MATCHES Debug)
@@ -147,7 +208,7 @@ if(PYTHONINTERP_FOUND)
else()
add_GUDHI_PYTHON_lib("${Boost_SYSTEM_LIBRARY_RELEASE}")
endif()
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${Boost_LIBRARY_DIRS}', ")
+ add_GUDHI_PYTHON_lib_dir(${Boost_LIBRARY_DIRS})
message("** Add Boost ${Boost_LIBRARY_DIRS}")
endif(CGAL_HEADER_ONLY)
# GMP and GMPXX are not required, but if present, CGAL will link with them.
@@ -155,15 +216,20 @@ if(PYTHONINTERP_FOUND)
add_gudhi_debug_info("GMP_LIBRARIES = ${GMP_LIBRARIES}")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMP', ")
add_GUDHI_PYTHON_lib("${GMP_LIBRARIES}")
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${GMP_LIBRARIES_DIR}', ")
+ if(NOT GMP_LIBRARIES_DIR)
+ get_filename_component(GMP_LIBRARIES_DIR ${GMP_LIBRARIES} PATH)
+ message("GMP_LIBRARIES_DIR from GMP_LIBRARIES set to ${GMP_LIBRARIES_DIR}")
+ endif(NOT GMP_LIBRARIES_DIR)
+ add_GUDHI_PYTHON_lib_dir(${GMP_LIBRARIES_DIR})
message("** Add gmp ${GMP_LIBRARIES_DIR}")
+ # When FORCE_CGAL_NOT_TO_BUILD_WITH_GMPXX is set, not defining CGAL_USE_GMPXX is sufficient enough
if(GMPXX_FOUND)
add_gudhi_debug_info("GMPXX_LIBRARIES = ${GMPXX_LIBRARIES}")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMPXX', ")
add_GUDHI_PYTHON_lib("${GMPXX_LIBRARIES}")
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${GMPXX_LIBRARIES_DIR}', ")
+ add_GUDHI_PYTHON_lib_dir(${GMPXX_LIBRARIES_DIR})
message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
- endif(GMPXX_FOUND)
+ endif()
endif(GMP_FOUND)
if(MPFR_FOUND)
add_gudhi_debug_info("MPFR_LIBRARIES = ${MPFR_LIBRARIES}")
@@ -172,16 +238,17 @@ if(PYTHONINTERP_FOUND)
# In case CGAL is not header only, all MPFR variables are set except MPFR_LIBRARIES_DIR - Just set it
if(NOT MPFR_LIBRARIES_DIR)
get_filename_component(MPFR_LIBRARIES_DIR ${MPFR_LIBRARIES} PATH)
+ message("MPFR_LIBRARIES_DIR from MPFR_LIBRARIES set to ${MPFR_LIBRARIES_DIR}")
endif(NOT MPFR_LIBRARIES_DIR)
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${MPFR_LIBRARIES_DIR}', ")
- message("** Add mpfr ${MPFR_LIBRARIES}")
+ add_GUDHI_PYTHON_lib_dir(${MPFR_LIBRARIES_DIR})
+ message("** Add mpfr ${MPFR_LIBRARIES_DIR}")
endif(MPFR_FOUND)
-endif(CGAL_FOUND)
+ endif(CGAL_FOUND)
# Specific for Mac
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
- set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-mmacosx-version-min=10.12', ")
- set(GUDHI_PYTHON_EXTRA_LINK_ARGS "${GUDHI_PYTHON_EXTRA_LINK_ARGS}'-mmacosx-version-min=10.12', ")
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-mmacosx-version-min=10.14', ")
+ set(GUDHI_PYTHON_EXTRA_LINK_ARGS "${GUDHI_PYTHON_EXTRA_LINK_ARGS}'-mmacosx-version-min=10.14', ")
endif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# Loop on INCLUDE_DIRECTORIES PROPERTY
@@ -194,18 +261,22 @@ endif(CGAL_FOUND)
if (TBB_FOUND AND WITH_GUDHI_USE_TBB)
add_gudhi_debug_info("TBB version ${TBB_INTERFACE_VERSION} found and used")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DGUDHI_USE_TBB', ")
- if(CMAKE_BUILD_TYPE MATCHES Debug)
+ if((CMAKE_BUILD_TYPE MATCHES Debug) AND TBB_DEBUG_LIBRARY)
add_GUDHI_PYTHON_lib("${TBB_DEBUG_LIBRARY}")
add_GUDHI_PYTHON_lib("${TBB_MALLOC_DEBUG_LIBRARY}")
else()
add_GUDHI_PYTHON_lib("${TBB_RELEASE_LIBRARY}")
add_GUDHI_PYTHON_lib("${TBB_MALLOC_RELEASE_LIBRARY}")
endif()
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${TBB_LIBRARY_DIRS}', ")
+ add_GUDHI_PYTHON_lib_dir(${TBB_LIBRARY_DIRS})
message("** Add tbb ${TBB_LIBRARY_DIRS}")
set(GUDHI_PYTHON_INCLUDE_DIRS "${GUDHI_PYTHON_INCLUDE_DIRS}'${TBB_INCLUDE_DIRS}', ")
endif()
+ if(DEBUG_TRACES)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DDEBUG_TRACES', ")
+ endif(DEBUG_TRACES)
+
if(UNIX AND WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS)
set( GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}")
endif(UNIX AND WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS)
@@ -220,7 +291,20 @@ endif(CGAL_FOUND)
# Other .py files
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
- file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
+ file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera")
+ file(COPY "gudhi/datasets" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
+ file(COPY "gudhi/sklearn" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
+
+
+ # Some files for pip package
+ file(COPY "introduction.rst" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
+ file(COPY "pyproject.toml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
add_custom_command(
OUTPUT gudhi.so
@@ -230,39 +314,111 @@ endif(CGAL_FOUND)
add_custom_target(python ALL DEPENDS gudhi.so
COMMENT "Do not forget to add ${CMAKE_CURRENT_BINARY_DIR}/ to your PYTHONPATH before using examples or tests")
- install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)")
+ # Path separator management for windows
+ if (WIN32)
+ set(GUDHI_PYTHON_PATH_ENV "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR};$ENV{PYTHONPATH}")
+ else(WIN32)
+ set(GUDHI_PYTHON_PATH_ENV "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:$ENV{PYTHONPATH}")
+ endif(WIN32)
+ # Documentation generation is available through sphinx - requires all modules
+ # Make it first as sphinx test is by far the longest test which is nice when testing in parallel
+ if(SPHINX_PATH)
+ if(SPHINX_PARAMLINKS_FOUND)
+ if(PYTHON_DOCS_THEME_FOUND)
+ if(MATPLOTLIB_FOUND)
+ if(NUMPY_FOUND)
+ if(SCIPY_FOUND)
+ if(SKLEARN_FOUND)
+ if(OT_FOUND)
+ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
+ # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
+ # Images and biblio warnings because not found on developer version
+ if (GUDHI_PYTHON_PATH STREQUAL "src/python")
+ set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developer version. Images and biblio will miss")
+ endif()
+ # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
+ add_custom_target(sphinx
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
+ COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
+ add_test(NAME sphinx_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
+ # Set missing or not modules
+ set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
+ else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 5.1.0")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ else(OT_FOUND)
+ message("++ Python documentation module will not be compiled because POT was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(OT_FOUND)
+ else(SKLEARN_FOUND)
+ message("++ Python documentation module will not be compiled because scikit-learn was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SKLEARN_FOUND)
+ else(SCIPY_FOUND)
+ message("++ Python documentation module will not be compiled because scipy was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SCIPY_FOUND)
+ else(NUMPY_FOUND)
+ message("++ Python documentation module will not be compiled because numpy was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NUMPY_FOUND)
+ else(MATPLOTLIB_FOUND)
+ message("++ Python documentation module will not be compiled because matplotlib was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(MATPLOTLIB_FOUND)
+ else(PYTHON_DOCS_THEME_FOUND)
+ message("++ Python documentation module will not be compiled because python-docs-theme was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(PYTHON_DOCS_THEME_FOUND)
+ else(SPHINX_PARAMLINKS_FOUND)
+ message("++ Python documentation module will not be compiled because sphinxcontrib-paramlinks was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SPHINX_PARAMLINKS_FOUND)
+ else(SPHINX_PATH)
+ message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SPHINX_PATH)
+
# Test examples
- if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Bottleneck and Alpha
add_test(NAME alpha_rips_persistence_bottleneck_distance_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_rips_persistence_bottleneck_distance.py"
-f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -t 0.15 -d 3)
- if(MATPLOTLIB_FOUND AND NUMPY_FOUND)
- # Tangential
- add_test(NAME tangential_complex_plain_homology_from_off_file_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/tangential_complex_plain_homology_from_off_file_example.py"
- --no-diagram -i 2 -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
-
- add_gudhi_py_test(test_tangential_complex)
-
- # Witness complex AND Subsampling
- add_test(NAME euclidean_strong_witness_complex_diagram_persistence_from_off_file_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
-
- add_test(NAME euclidean_witness_complex_diagram_persistence_from_off_file_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
- endif()
+ endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ # Tangential
+ add_test(NAME tangential_complex_plain_homology_from_off_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/tangential_complex_plain_homology_from_off_file_example.py"
+ --no-diagram -i 2 -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
+
+ add_gudhi_py_test(test_tangential_complex)
+
+ # Witness complex
+ add_test(NAME euclidean_strong_witness_complex_diagram_persistence_from_off_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py"
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
+
+ add_test(NAME euclidean_witness_complex_diagram_persistence_from_off_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py"
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
# Subsampling
add_gudhi_py_test(test_subsampling)
@@ -272,100 +428,101 @@ endif(CGAL_FOUND)
# Bottleneck
add_test(NAME bottleneck_basic_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/bottleneck_basic_example.py")
add_gudhi_py_test(test_bottleneck_distance)
+ endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- # Cover complex
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- add_test(NAME cover_complex_nerve_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
- -f human.off -c 2 -r 10 -g 0.3)
+ # Cover complex
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ add_test(NAME cover_complex_nerve_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
+ -f human.off -c 2 -r 10 -g 0.3)
- add_test(NAME cover_complex_coordinate_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
- -f human.off -c 0 -v)
+ add_test(NAME cover_complex_coordinate_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
+ -f human.off -c 0 -v)
- add_test(NAME cover_complex_functional_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
- -o lucky_cat.off
- -f lucky_cat_PCA1 -v)
+ add_test(NAME cover_complex_functional_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
+ -o lucky_cat.off
+ -f lucky_cat_PCA1 -v)
- add_test(NAME cover_complex_voronoi_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
- -f human.off -n 700 -v)
+ add_test(NAME cover_complex_voronoi_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
+ -f human.off -n 700 -v)
- add_gudhi_py_test(test_cover_complex)
- endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_gudhi_py_test(test_cover_complex)
- if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Alpha
add_test(NAME alpha_complex_from_points_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_points_example.py")
- if(MATPLOTLIB_FOUND AND NUMPY_FOUND)
- add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test
+ add_test(NAME alpha_complex_from_generated_points_on_sphere_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_diagram_persistence_from_off_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 0.6)
- endif()
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_generated_points_on_sphere_example.py")
+ add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_diagram_persistence_from_off_file_example.py"
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
add_gudhi_py_test(test_alpha_complex)
- endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Euclidean witness
add_gudhi_py_test(test_euclidean_witness_complex)
+ # Datasets generators
+ add_gudhi_py_test(test_datasets_generators) # TODO separate full python datasets generators in another test file independent from CGAL ?
+
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Cubical
add_test(NAME periodic_cubical_complex_barcode_persistence_from_perseus_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py"
--no-barcode -f ${CMAKE_SOURCE_DIR}/data/bitmap/CubicalTwoSphere.txt)
- if(NUMPY_FOUND)
- add_test(NAME random_cubical_complex_persistence_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/random_cubical_complex_persistence_example.py"
- 10 10 10)
- endif()
+ add_test(NAME random_cubical_complex_persistence_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/random_cubical_complex_persistence_example.py"
+ 10 10 10)
add_gudhi_py_test(test_cubical_complex)
# Rips
- if(MATPLOTLIB_FOUND AND NUMPY_FOUND)
- add_test(NAME rips_complex_diagram_persistence_from_distance_matrix_file_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3)
+ add_test(NAME rips_complex_diagram_persistence_from_distance_matrix_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py"
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3)
- add_test(NAME rips_complex_diagram_persistence_from_off_file_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_off_file_example.py
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -e 0.25 -d 3)
- endif()
+ add_test(NAME rips_complex_diagram_persistence_from_off_file_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_off_file_example.py
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -e 0.25 -d 3)
add_test(NAME rips_complex_from_points_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_from_points_example.py)
add_gudhi_py_test(test_rips_complex)
@@ -373,97 +530,103 @@ endif(CGAL_FOUND)
# Simplex tree
add_test(NAME simplex_tree_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py)
add_gudhi_py_test(test_simplex_tree)
+ add_gudhi_py_test(test_simplex_generators)
# Witness
add_test(NAME witness_complex_from_nearest_landmark_table_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/witness_complex_from_nearest_landmark_table.py)
add_gudhi_py_test(test_witness_complex)
# Reader utils
add_gudhi_py_test(test_reader_utils)
+ add_gudhi_py_test(test_off)
# Wasserstein
if(OT_FOUND)
- add_gudhi_py_test(test_wasserstein_distance)
- endif(OT_FOUND)
+ # EagerPy dependency because of enable_autodiff=True
+ if(EAGERPY_FOUND)
+ add_gudhi_py_test(test_wasserstein_distance)
+ endif()
+
+ add_gudhi_py_test(test_wasserstein_barycenter)
+
+ if(TORCH_FOUND AND TENSORFLOW_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_wasserstein_with_tensors)
+ endif()
+ endif()
# Representations
- if(SKLEARN_FOUND AND MATPLOTLIB_FOUND)
+ if(SKLEARN_FOUND AND MATPLOTLIB_FOUND AND OT_FOUND AND NOT CGAL_VERSION VERSION_LESS 4.11.0)
add_gudhi_py_test(test_representations)
endif()
- # Documentation generation is available through sphinx - requires all modules
- if(SPHINX_PATH)
- if(MATPLOTLIB_FOUND)
- if(NUMPY_FOUND)
- if(SCIPY_FOUND)
- if(SKLEARN_FOUND)
- if(OT_FOUND)
- if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
- # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
- # Images and biblio warnings because not found on developper version
- if (GUDHI_PYTHON_PATH STREQUAL "src/python")
- set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
- endif()
- # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
- add_custom_target(sphinx
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
- DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
- COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
-
- add_test(NAME sphinx_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
-
- # Set missing or not modules
- set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
- else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- else(OT_FOUND)
- message("++ Python documentation module will not be compiled because POT was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(OT_FOUND)
- else(SKLEARN_FOUND)
- message("++ Python documentation module will not be compiled because scikit-learn was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SKLEARN_FOUND)
- else(SCIPY_FOUND)
- message("++ Python documentation module will not be compiled because scipy was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SCIPY_FOUND)
- else(NUMPY_FOUND)
- message("++ Python documentation module will not be compiled because numpy was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NUMPY_FOUND)
- else(MATPLOTLIB_FOUND)
- message("++ Python documentation module will not be compiled because matplotlib was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(MATPLOTLIB_FOUND)
- else(SPHINX_PATH)
- message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SPHINX_PATH)
+ # Differentiation
+ if(TENSORFLOW_FOUND)
+ add_gudhi_py_test(test_diff)
+ endif()
+
+ # Betti curves
+ if(SKLEARN_FOUND AND SCIPY_FOUND)
+ add_gudhi_py_test(test_betti_curve_representations)
+ endif()
+
+ # Representations preprocessing
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_representations_preprocessing)
+ endif()
+
+ # Time Delay
+ add_gudhi_py_test(test_time_delay)
+
+ # DTM
+ if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_knn)
+ add_gudhi_py_test(test_dtm)
+ endif()
+
+ # Tomato
+ if(SCIPY_FOUND AND SKLEARN_FOUND)
+ add_gudhi_py_test(test_tomato)
+ endif()
+ # Weighted Rips
+ if(SCIPY_FOUND)
+ add_gudhi_py_test(test_weighted_rips_complex)
+ endif()
+
+ # DTM Rips
+ if(SCIPY_FOUND)
+ add_gudhi_py_test(test_dtm_rips_complex)
+ endif()
+
+ # Fetch remote datasets
+ if(WITH_GUDHI_REMOTE_TEST)
+ add_gudhi_py_test(test_remote_datasets)
+ endif()
+
+ # sklearn
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_sklearn_cubical_persistence)
+ endif()
+
+ # persistence graphical tools
+ if(MATPLOTLIB_FOUND)
+ add_gudhi_py_test(test_persistence_graphical_tools)
+ endif()
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
- else(CYTHON_FOUND)
- message("++ Python module will not be compiled because cython was not found")
+ else(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
+ message("++ Python module will not be compiled because numpy and/or cython and/or pybind11 was/were not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(CYTHON_FOUND)
+ endif(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/python/doc/_templates/layout.html b/src/python/doc/_templates/layout.html
index 2f2d9c72..e074b6c7 100644
--- a/src/python/doc/_templates/layout.html
+++ b/src/python/doc/_templates/layout.html
@@ -175,58 +175,60 @@
<h1 class="show-for-small-only"><a href="" class="icon-tree"> GUDHI library</a></h1>
</li>
<!-- Remove the class "menu-icon" to get rid of menu icon. Take out "Menu" to just have icon alone -->
- <li class="toggle-topbar menu-icon"><a href="#"><span>Navigation</span></a></li>
+ <li class="toggle-topbar menu-icon"><a href="#"><span>Nav</span></a></li>
</ul>
<section class="top-bar-section">
<ul class="right">
<li class="divider"></li>
- <li><a href="/contact/">Contact</a></li>
+ <li><a href="/contact/">Contact</a></li>
</ul>
<ul class="left">
- <li><a href="/"> <img src="/assets/img/home.png" alt="&nbsp;&nbsp;GUDHI">&nbsp;&nbsp;GUDHI </a></li>
+ <li><a href="/"> <img src="/assets/img/home.png" alt=" GUDHI"> GUDHI </a></li>
<li class="divider"></li>
<li class="has-dropdown">
- <a href="#">Project</a>
+ <a href="#">Project</a>
<ul class="dropdown">
- <li><a href="/people/">People</a></li>
- <li><a href="/keepintouch/">Keep in touch</a></li>
- <li><a href="/partners/">Partners and Funding</a></li>
- <li><a href="/relatedprojects/">Related projects</a></li>
- <li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
- <li><a href="/inaction/">GUDHI in action</a></li>
+ <li><a href="/people/">People</a></li>
+ <li><a href="/keepintouch/">Keep in touch</a></li>
+ <li><a href="/partners/">Partners and Funding</a></li>
+ <li><a href="/relatedprojects/">Related projects</a></li>
+ <li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
+ <li><a href="/inaction/">GUDHI in action</a></li>
+ <li><a href="/etymology/">Etymology</a></li>
</ul>
</li>
<li class="divider"></li>
<li class="has-dropdown">
- <a href="#">Download</a>
+ <a href="#">Download</a>
<ul class="dropdown">
- <li><a href="/licensing/">Licensing</a></li>
- <li><a href="https://gforge.inria.fr/frs/download.php/latestzip/5253/library-latest.zip" target="_blank">Get the latest sources</a></li>
- <li><a href="/conda/">Conda package</a></li>
- <li><a href="/dockerfile/">Dockerfile</a></li>
+ <li><a href="/licensing/">Licensing</a></li>
+ <li><a href="https://github.com/GUDHI/gudhi-devel/releases/latest" target="_blank">Get the latest sources</a></li>
+ <li><a href="/conda/">Conda package</a></li>
+ <li><a href="https://pypi.org/project/gudhi/" target="_blank">Pip package</a></li>
+ <li><a href="/dockerfile/">Dockerfile</a></li>
</ul>
</li>
<li class="divider"></li>
<li class="has-dropdown">
- <a href="#">Documentation</a>
+ <a href="#">Documentation</a>
<ul class="dropdown">
- <li><a href="/introduction/">Introduction</a></li>
- <li><a href="https://gudhi.inria.fr/doc/latest/installation.html">C++ installation manual</a></li>
- <li><a href="https://gudhi.inria.fr/doc/latest/">C++ documentation</a></li>
- <li><a href="https://gudhi.inria.fr/python/latest/installation.html">Python installation manual</a></li>
- <li><a href="https://gudhi.inria.fr/python/latest/">Python documentation</a></li>
- <li><a href="/utils/">Utilities</a></li>
- <li><a href="/tutorials/">Tutorials</a></li>
+ <li><a href="/introduction/">Introduction</a></li>
+ <li><a href="/doc/latest/installation.html">C++ installation manual</a></li>
+ <li><a href="/doc/latest/">C++ documentation</a></li>
+ <li><a href="/python/latest/installation.html">Python installation manual</a></li>
+ <li><a href="/python/latest/">Python documentation</a></li>
+ <li><a href="/utils/">Utilities</a></li>
+ <li><a href="/tutorials/">Tutorials</a></li>
</ul>
</li>
<li class="divider"></li>
- <li><a href="/interfaces/">Interfaces</a></li>
+ <li><a href="/interfaces/">Interfaces</a></li>
<li class="divider"></li>
</ul>
</section>
</nav>
- </div><!-- /#navigation -->
- <!-- GUDHI website header BEGIN -->
+ </div><!-- /#navigation -->
+ <!-- GUDHI website header END -->
{%- block header %}{% endblock %}
diff --git a/src/python/doc/alpha_complex_ref.rst b/src/python/doc/alpha_complex_ref.rst
index 7da79543..eaa72551 100644
--- a/src/python/doc/alpha_complex_ref.rst
+++ b/src/python/doc/alpha_complex_ref.rst
@@ -9,6 +9,5 @@ Alpha complex reference manual
.. autoclass:: gudhi.AlphaComplex
:members:
:undoc-members:
- :show-inheritance:
.. automethod:: gudhi.AlphaComplex.__init__
diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc
index b5af0d27..5c76fd54 100644
--- a/src/python/doc/alpha_complex_sum.inc
+++ b/src/python/doc/alpha_complex_sum.inc
@@ -1,17 +1,15 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
- | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | |
- | :alt: Alpha complex representation | | :Introduced in: GUDHI 2.0.0 |
- | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | |
- | | the circumradius of the simplex if the circumsphere is empty (the | :Copyright: MIT (`GPL v3 </licensing/>`_) |
- | | simplex is then said to be Gabriel), and as the minimum of the | |
- | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | Gabriel otherwise. | |
- | | | |
- | | For performances reasons, it is advised to use CGAL ≥ 5.0.0. | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
+ | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. It has the same persistent homology | |
+ | :alt: Alpha complex representation | as the Čech complex and is significantly smaller. | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | | |
+ | | | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 5.1 |
+ | | | |
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
+ +----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index 60319e84..9e67d38a 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -9,15 +9,31 @@ Definition
.. include:: alpha_complex_sum.inc
-`AlphaComplex` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
-`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
-:cite:`cgal:hdj-t-19b` from `CGAL <http://www.cgal.org/>`_ (the Computational Geometry Algorithms Library
-:cite:`cgal:eb-19b`).
+:class:`~gudhi.AlphaComplex` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
+`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
+:cite:`cgal:hdj-t-19b` from the `Computational Geometry Algorithms Library <http://www.cgal.org/>`_
+:cite:`cgal:eb-19b`.
Remarks
^^^^^^^
-When an :math:`\alpha`-complex is constructed with an infinite value of :math:`\alpha^2`,
-the complex is a Delaunay complex (with special filtration values).
+* When an :math:`\alpha`-complex is constructed with an infinite value of :math:`\alpha^2`, the complex is a Delaunay
+ complex (with special filtration values). The Delaunay complex without filtration values is also available by
+ passing :code:`default_filtration_value = True` to :func:`~gudhi.AlphaComplex.create_simplex_tree`.
+* For people only interested in the topology of the Alpha complex (for instance persistence), Alpha complex is
+ equivalent to the `Čech complex <https://gudhi.inria.fr/doc/latest/group__cech__complex.html>`_ and much smaller if
+ you do not bound the radii. `Čech complex <https://gudhi.inria.fr/doc/latest/group__cech__complex.html>`_ can still
+ make sense in higher dimension precisely because you can bound the radii.
+* Using the default :code:`precision = 'safe'` makes the construction safe.
+ If you pass :code:`precision = 'exact'` to :func:`~gudhi.AlphaComplex.__init__`, the filtration values are the exact
+ ones converted to float. This can be very slow.
+ If you pass :code:`precision = 'safe'` (the default), the filtration values are only
+ guaranteed to have a small multiplicative error compared to the exact value, see
+ :func:`~gudhi.AlphaComplex.set_float_relative_precision` to modify the precision.
+ A drawback, when computing persistence, is that an empty exact interval [10^12,10^12] may become a
+ non-empty approximate interval [10^12,10^12+10^6].
+ Using :code:`precision = 'fast'` makes the computations slightly faster, and the combinatorics are still exact, but
+ the computation of filtration values can exceptionally be arbitrarily bad. In all cases, we still guarantee that the
+ output is a valid filtration (faces have a filtration value no larger than their cofaces).
Example from points
-------------------
@@ -26,23 +42,22 @@ This example builds the alpha-complex from the given points:
.. testcode::
- import gudhi
- alpha_complex = gudhi.AlphaComplex(points=[[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]])
+ from gudhi import AlphaComplex
+ ac = AlphaComplex(points=[[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]])
+
+ stree = ac.create_simplex_tree()
+ print('Alpha complex is of dimension ', stree.dimension(), ' - ',
+ stree.num_simplices(), ' simplices - ', stree.num_vertices(), ' vertices.')
- simplex_tree = alpha_complex.create_simplex_tree()
- result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
+ for filtered_value in stree.get_filtration():
print(fmt % tuple(filtered_value))
The output is:
.. testoutput::
- Alpha complex is of dimension 2 - 25 simplices - 7 vertices.
+ Alpha complex is of dimension 2 - 25 simplices - 7 vertices.
[0] -> 0.00
[1] -> 0.00
[2] -> 0.00
@@ -89,25 +104,28 @@ In order to build the alpha complex, first, a Simplex tree is built from the cel
Filtration value computation algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- **for** i : dimension :math:`\rightarrow` 0 **do**
- **for all** :math:`\sigma` of dimension i
- **if** filtration(:math:`\sigma`) is NaN **then**
- filtration(:math:`\sigma`) = :math:`\alpha^2(\sigma)`
- **end if**
-
- *//propagate alpha filtration value*
-
- **for all** :math:`\tau` face of :math:`\sigma`
- **if** filtration(:math:`\tau`) is not NaN **then**
- filtration(:math:`\tau`) = filtration(:math:`\sigma`)
- **end if**
- **end for**
- **end for**
- **end for**
+.. code-block:: vim
+
+ for i : dimension → 0 do
+ for all σ of dimension i
+ if filtration(σ) is NaN then
+ filtration(σ) = α²(σ)
+ end if
+ for all τ face of σ do // propagate alpha filtration value
+ if filtration(τ) is not NaN then
+ filtration(τ) = min( filtration(τ), filtration(σ) )
+ else
+ if τ is not Gabriel for σ then
+ filtration(τ) = filtration(σ)
+ end if
+ end if
+ end for
+ end for
+ end for
+
+ make_filtration_non_decreasing()
+ prune_above_filtration()
- make_filtration_non_decreasing()
-
- prune_above_filtration()
Dimension 2
^^^^^^^^^^^
@@ -142,7 +160,10 @@ As the squared radii computed by CGAL are an approximation, it might happen that
:math:`\alpha^2` values do not quite define a proper filtration (i.e. non-decreasing with
respect to inclusion).
We fix that up by calling :func:`~gudhi.SimplexTree.make_filtration_non_decreasing` (cf.
-`C++ version <http://gudhi.gforge.inria.fr/doc/latest/index.html>`_).
+`C++ version <https://gudhi.inria.fr/doc/latest/class_gudhi_1_1_simplex__tree.html>`_).
+
+.. note::
+ This is not the case in `exact` version, this is the reason why it is not called in this case.
Prune above given filtration value
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -153,59 +174,86 @@ of speed-up, since we always first build the full filtered complex, so it is rec
:paramref:`~gudhi.AlphaComplex.create_simplex_tree.max_alpha_square`.
In the following example, a threshold of :math:`\alpha^2 = 32.0` is used.
+Weighted version
+^^^^^^^^^^^^^^^^
-Example from OFF file
-^^^^^^^^^^^^^^^^^^^^^
+A weighted version for Alpha complex is available. It is like a usual Alpha complex, but based on a
+`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT>`_.
-This example builds the Delaunay triangulation from the points given by an OFF file, and initializes the alpha complex
-with it.
+This example builds the weighted alpha-complex of a small molecule, where atoms have different sizes.
+It is taken from
+`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes>`_.
-
-Then, it is asked to display information about the alpha complex:
+Then, it is asked to display information about the alpha complex.
.. testcode::
- import gudhi
- alpha_complex = gudhi.AlphaComplex(off_file=gudhi.__root_source_dir__ + \
- '/data/points/alphacomplexdoc.off')
- simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=32.0)
- result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
+ from gudhi import AlphaComplex
+ wgt_ac = AlphaComplex(points=[[ 1., -1., -1.],
+ [-1., 1., -1.],
+ [-1., -1., 1.],
+ [ 1., 1., 1.],
+ [ 2., 2., 2.]],
+ weights = [4., 4., 4., 4., 1.])
+
+ stree = wgt_ac.create_simplex_tree()
+ print('Weighted alpha complex is of dimension ', stree.dimension(), ' - ',
+ stree.num_simplices(), ' simplices - ', stree.num_vertices(), ' vertices.')
fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
- print(fmt % tuple(filtered_value))
+ for simplex in stree.get_simplices():
+ print(fmt % tuple(simplex))
-the program output is:
+The output is:
.. testoutput::
- Alpha complex is of dimension 2 - 20 simplices - 7 vertices.
- [0] -> 0.00
- [1] -> 0.00
- [2] -> 0.00
- [3] -> 0.00
- [4] -> 0.00
- [5] -> 0.00
- [6] -> 0.00
- [2, 3] -> 6.25
- [4, 5] -> 7.25
- [0, 2] -> 8.50
- [0, 1] -> 9.25
- [1, 3] -> 10.00
- [1, 2] -> 11.25
- [1, 2, 3] -> 12.50
- [0, 1, 2] -> 13.00
- [5, 6] -> 13.25
- [2, 4] -> 20.00
- [4, 6] -> 22.74
- [4, 5, 6] -> 22.74
- [3, 6] -> 30.25
+ Weighted alpha complex is of dimension 3 - 29 simplices - 5 vertices.
+ [0, 1, 2, 3] -> -1.00
+ [0, 1, 2] -> -1.33
+ [0, 1, 3, 4] -> 95.00
+ [0, 1, 3] -> -1.33
+ [0, 1, 4] -> 95.00
+ [0, 1] -> -2.00
+ [0, 2, 3, 4] -> 95.00
+ [0, 2, 3] -> -1.33
+ [0, 2, 4] -> 95.00
+ [0, 2] -> -2.00
+ [0, 3, 4] -> 23.00
+ [0, 3] -> -2.00
+ [0, 4] -> 23.00
+ [0] -> -4.00
+ [1, 2, 3, 4] -> 95.00
+ [1, 2, 3] -> -1.33
+ [1, 2, 4] -> 95.00
+ [1, 2] -> -2.00
+ [1, 3, 4] -> 23.00
+ [1, 3] -> -2.00
+ [1, 4] -> 23.00
+ [1] -> -4.00
+ [2, 3, 4] -> 23.00
+ [2, 3] -> -2.00
+ [2, 4] -> 23.00
+ [2] -> -4.00
+ [3, 4] -> -1.00
+ [3] -> -4.00
+ [4] -> -1.00
+
+Example from OFF file
+^^^^^^^^^^^^^^^^^^^^^
+
+This example builds the alpha complex from 300 random points on a 2-torus, given by an
+`OFF file <fileformats.html#off-file-format>`_.
+
+Then, it computes the persistence diagram and displays it:
-CGAL citations
-==============
+.. plot::
+ :include-source:
-.. bibliography:: ../../biblio/how_to_cite_cgal.bib
- :filter: docnames
- :style: unsrt
+ import matplotlib.pyplot as plt
+ import gudhi as gd
+ off_file = gd.__root_source_dir__ + '/data/points/tore3D_300.off'
+ points = gd.read_points_from_off_file(off_file = off_file)
+ stree = gd.AlphaComplex(points = points).create_simplex_tree()
+ dgm = stree.persistence()
+ gd.plot_persistence_diagram(dgm, legend = True)
+ plt.show()
diff --git a/src/python/doc/bottleneck_distance_sum.inc b/src/python/doc/bottleneck_distance_sum.inc
index 6eb0ac19..77dc368d 100644
--- a/src/python/doc/bottleneck_distance_sum.inc
+++ b/src/python/doc/bottleneck_distance_sum.inc
@@ -1,14 +1,14 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi |
- | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | |
- | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Introduced in: GUDHI 2.0.0 |
- | | diagonal points) such that any couple of matched points are at | |
- | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :Copyright: MIT (`GPL v3 </licensing/>`_) |
- | the longest edge | norm in :math:`\mathbb{R}^2`. | |
- | | | :Requires: `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`bottleneck_distance_user` | |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+
+ | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi |
+ | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | |
+ | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 |
+ | | diagonal points) such that any couple of matched points are at | |
+ | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 </licensing/>`_) |
+ | the longest edge | norm in :math:`\mathbb{R}^2`. | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+
+ | * :doc:`bottleneck_distance_user` | |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst
index 9435c7f1..7baa76cc 100644
--- a/src/python/doc/bottleneck_distance_user.rst
+++ b/src/python/doc/bottleneck_distance_user.rst
@@ -9,14 +9,23 @@ Definition
.. include:: bottleneck_distance_sum.inc
-This implementation is based on ideas from "Geometry Helps in Bottleneck Matching and Related Problems"
-:cite:`DBLP:journals/algorithmica/EfratIK01`. Another relevant publication, although it was not used is
-"Geometry Helps to Compare Persistence Diagrams" :cite:`Kerber:2017:GHC:3047249.3064175`.
+This implementation by François Godi is based on ideas from "Geometry Helps in Bottleneck Matching and Related Problems"
+:cite:`DBLP:journals/algorithmica/EfratIK01` and requires `CGAL <installation.html#cgal>`_ (`GPL v3 </licensing/>`_).
-Function
---------
.. autofunction:: gudhi.bottleneck_distance
+This other implementation comes from `Hera
+<https://bitbucket.org/grey_narn/hera/src/master/>`_ (BSD-3-Clause) which is
+based on "Geometry Helps to Compare Persistence Diagrams"
+:cite:`Kerber:2017:GHC:3047249.3064175` by Michael Kerber, Dmitriy
+Morozov, and Arnur Nigmetov.
+
+.. warning::
+ Beware that its approximation allows for a multiplicative error, while the function above uses an additive error.
+
+.. autofunction:: gudhi.hera.bottleneck_distance
+
+
Distance computation
--------------------
@@ -38,7 +47,7 @@ The following example explains how the distance is computed:
:figclass: align-center
The point (0, 13) is at distance 6.5 from the diagonal and more
- specifically from the point (6.5, 6.5)
+ specifically from the point (6.5, 6.5).
Basic example
@@ -63,5 +72,6 @@ The output is:
.. testoutput::
- Bottleneck distance approximation = 0.81
+ Bottleneck distance approximation = 0.72
Bottleneck distance value = 0.75
+
diff --git a/src/python/doc/clustering.inc b/src/python/doc/clustering.inc
new file mode 100644
index 00000000..2d07ae88
--- /dev/null
+++ b/src/python/doc/clustering.inc
@@ -0,0 +1,12 @@
+.. table::
+ :widths: 30 40 30
+
+ +--------------------------+-------------------------------------------------------+---------------------------------+
+ | .. figure:: | Clustering tools. | :Author: Marc Glisse |
+ | img/spiral-color.png | | |
+ | | | :Since: GUDHI 3.3.0 |
+ | | | |
+ | | | :License: MIT |
+ +--------------------------+-------------------------------------------------------+---------------------------------+
+ | * :doc:`clustering` |
+ +--------------------------+-----------------------------------------------------------------------------------------+
diff --git a/src/python/doc/clustering.rst b/src/python/doc/clustering.rst
new file mode 100644
index 00000000..62422682
--- /dev/null
+++ b/src/python/doc/clustering.rst
@@ -0,0 +1,72 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+=================
+Clustering manual
+=================
+
+We provide an implementation of ToMATo :cite:`tomato`, a persistence-based clustering algorithm. In short, this algorithm uses a density estimator and a neighborhood graph, starts with a mode-seeking phase (naive hill-climbing) to build initial clusters, and finishes by merging clusters based on their prominence.
+
+The merging phase depends on a parameter, which is the minimum prominence a cluster needs to avoid getting merged into another, bigger cluster. This parameter determines the number of clusters, and for convenience we allow you to choose instead the number of clusters. Decreasing the prominence threshold defines a hierarchy of clusters: if 2 points are in separate clusters when we have k clusters, they are still in different clusters for k+1 clusters.
+
+As a by-product, we produce the persistence diagram of the merge tree of the initial clusters. This is a convenient graphical tool to help decide how many clusters we want.
+
+.. plot::
+ :context:
+ :include-source:
+
+ import gudhi
+ from gudhi.datasets.remote import fetch_spiral_2d
+ data = fetch_spiral_2d()
+ import matplotlib.pyplot as plt
+ plt.scatter(data[:,0],data[:,1],marker='.',s=1)
+ plt.show()
+
+.. plot::
+ :context: close-figs
+ :include-source:
+
+ from gudhi.clustering.tomato import Tomato
+ t = Tomato()
+ t.fit(data)
+ t.plot_diagram()
+
+As one can see in `t.n_clusters_`, the algorithm found 6316 initial clusters. The diagram shows their prominence as their distance to the diagonal. There is always one point infinitely far: there is at least one cluster. Among the others, one point seems significantly farther from the diagonal than the others, which indicates that splitting the points into 2 clusters may be a sensible idea.
+
+.. plot::
+ :context: close-figs
+ :include-source:
+
+ t.n_clusters_=2
+ plt.scatter(data[:,0],data[:,1],marker='.',s=1,c=t.labels_)
+ plt.show()
+
+Of course this is just the result for one set of parameters. We can ask for a different density estimator and a different neighborhood graph computed with different parameters.
+
+.. plot::
+ :context: close-figs
+ :include-source:
+
+ t = Tomato(density_type='DTM', k=100)
+ t.fit(data)
+ t.plot_diagram()
+
+Makes the number of clusters clearer, and changes a bit the shape of the clusters.
+
+A quick look at the corresponding density estimate
+
+.. plot::
+ :context: close-figs
+ :include-source:
+
+ plt.scatter(data[:,0],data[:,1],marker='.',s=1,c=t.weights_)
+ plt.show()
+
+The code provides a few density estimators and graph constructions for convenience when first experimenting, but it is actually expected that advanced users provide their own graph and density estimates instead of point coordinates.
+
+Since the algorithm essentially computes basins of attraction, it is also encouraged to use it on functions that do not represent densities at all.
+
+.. autoclass:: gudhi.clustering.tomato.Tomato
+ :members:
+ :special-members: __init__
diff --git a/src/python/doc/conf.py b/src/python/doc/conf.py
index 3cc5d1d6..e69e2751 100755
--- a/src/python/doc/conf.py
+++ b/src/python/doc/conf.py
@@ -44,6 +44,8 @@ extensions = [
'sphinx_paramlinks',
]
+bibtex_bibfiles = ['../../biblio/bibliography.bib']
+
todo_include_todos = True
# plot option : do not show hyperlinks (Source code, png, hires.png, pdf)
plot_html_show_source_link = False
@@ -118,15 +120,12 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'classic'
+html_theme = 'python_docs_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
- "sidebarbgcolor": "#A1ADCD",
- "sidebartextcolor": "black",
- "sidebarlinkcolor": "#334D5C",
"body_max_width": "100%",
}
diff --git a/src/python/doc/cubical_complex_sklearn_itf_ref.rst b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
new file mode 100644
index 00000000..90ae9ccd
--- /dev/null
+++ b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
@@ -0,0 +1,102 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Cubical complex persistence scikit-learn like interface
+#######################################################
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.6.0
+ - :License: MIT
+ - :Requires: `Scikit-learn <installation.html#scikit-learn>`_
+
+Cubical complex persistence scikit-learn like interface example
+---------------------------------------------------------------
+
+In this example, hand written digits are used as an input.
+a TDA scikit-learn pipeline is constructed and is composed of:
+
+#. :class:`~gudhi.sklearn.cubical_persistence.CubicalPersistence` that builds a cubical complex from the inputs and
+ returns its persistence diagrams
+#. :class:`~gudhi.representations.preprocessing.DiagramSelector` that removes non-finite persistence diagrams values
+#. :class:`~gudhi.representations.vector_methods.PersistenceImage` that builds the persistence images from persistence diagrams
+#. `SVC <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ which is a scikit-learn support
+ vector classifier.
+
+This ML pipeline is trained to detect if the hand written digit is an '8' or not, thanks to the fact that an '8' has
+two holes in :math:`\mathbf{H}_1`, or, like in this example, three connected components in :math:`\mathbf{H}_0`.
+
+.. code-block:: python
+
+ # Standard scientific Python imports
+ import numpy as np
+
+ # Standard scikit-learn imports
+ from sklearn.datasets import fetch_openml
+ from sklearn.pipeline import Pipeline
+ from sklearn.model_selection import train_test_split
+ from sklearn.svm import SVC
+ from sklearn import metrics
+
+ # Import TDA pipeline requirements
+ from gudhi.sklearn.cubical_persistence import CubicalPersistence
+ from gudhi.representations import PersistenceImage, DiagramSelector
+
+ X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
+
+ # Target is: "is an eight ?"
+ y = (y == "8") * 1
+ print("There are", np.sum(y), "eights out of", len(y), "numbers.")
+
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
+ pipe = Pipeline(
+ [
+ ("cub_pers", CubicalPersistence(homology_dimensions=0, newshape=[-1, 28, 28], n_jobs=-2)),
+ # Or for multiple persistence dimension computation
+ # ("cub_pers", CubicalPersistence(homology_dimensions=[0, 1], newshape=[-1, 28, 28])),
+ # ("H0_diags", DimensionSelector(index=0), # where index is the index in homology_dimensions array
+ ("finite_diags", DiagramSelector(use=True, point_type="finite")),
+ (
+ "pers_img",
+ PersistenceImage(bandwidth=50, weight=lambda x: x[1] ** 2, im_range=[0, 256, 0, 256], resolution=[20, 20]),
+ ),
+ ("svc", SVC()),
+ ]
+ )
+
+ # Learn from the train subset
+ pipe.fit(X_train, y_train)
+ # Predict from the test subset
+ predicted = pipe.predict(X_test)
+
+ print(f"Classification report for TDA pipeline {pipe}:\n" f"{metrics.classification_report(y_test, predicted)}\n")
+
+.. code-block:: none
+
+ There are 6825 eights out of 70000 numbers.
+ Classification report for TDA pipeline Pipeline(steps=[('cub_pers',
+ CubicalPersistence(newshape=[28, 28], n_jobs=-2)),
+ ('finite_diags', DiagramSelector(use=True)),
+ ('pers_img',
+ PersistenceImage(bandwidth=50, im_range=[0, 256, 0, 256],
+ weight=<function <lambda> at 0x7f3e54137ae8>)),
+ ('svc', SVC())]):
+ precision recall f1-score support
+
+ 0 0.97 0.99 0.98 25284
+ 1 0.92 0.68 0.78 2716
+
+ accuracy 0.96 28000
+ macro avg 0.94 0.84 0.88 28000
+ weighted avg 0.96 0.96 0.96 28000
+
+Cubical complex persistence scikit-learn like interface reference
+-----------------------------------------------------------------
+
+.. autoclass:: gudhi.sklearn.cubical_persistence.CubicalPersistence
+ :members:
+ :special-members: __init__
+ :show-inheritance: \ No newline at end of file
diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc
index f200e695..b27843e5 100644
--- a/src/python/doc/cubical_complex_sum.inc
+++ b/src/python/doc/cubical_complex_sum.inc
@@ -1,14 +1,22 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The cubical complex is an example of a structured complex useful in | :Author: Pawel Dlotko |
- | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | computational mathematics (specially rigorous numerics) and image | |
- | :alt: Cubical complex representation | analysis. | :Introduced in: GUDHI 2.0.0 |
- | :figclass: align-center | | |
- | | | :Copyright: MIT |
- | | | |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
- | | * :doc:`periodic_cubical_complex_ref` |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
+ | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | :Since: GUDHI 2.0.0 |
+ | :alt: Cubical complex representation | | :License: MIT |
+ | :figclass: align-center | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
+ | | * :doc:`periodic_cubical_complex_ref` |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_sklearn_itf_ref` | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ | img/sklearn.png | | |
+ | :target: https://scikit-learn.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
diff --git a/src/python/doc/cubical_complex_tflow_itf_ref.rst b/src/python/doc/cubical_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..b32f5e47
--- /dev/null
+++ b/src/python/doc/cubical_complex_tflow_itf_ref.rst
@@ -0,0 +1,40 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for cubical persistence
+########################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from cubical persistence
+-----------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import CubicalLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=tf.float32, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testoutput::
+
+ [[ 0. 0. 0. ]
+ [ 0. 0.5 0. ]
+ [ 0. 0. -0.5]]
+
+Documentation for CubicalLayer
+------------------------------
+
+.. autoclass:: gudhi.tensorflow.CubicalLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst
index 56cf0170..42a23875 100644
--- a/src/python/doc/cubical_complex_user.rst
+++ b/src/python/doc/cubical_complex_user.rst
@@ -7,14 +7,7 @@ Cubical complex user manual
Definition
----------
-===================================== ===================================== =====================================
-:Author: Pawel Dlotko :Introduced in: GUDHI PYTHON 2.0.0 :Copyright: GPL v3
-===================================== ===================================== =====================================
-
-+---------------------------------------------+----------------------------------------------------------------------+
-| :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+---------------------------------------------+----------------------------------------------------------------------+
+.. include:: cubical_complex_sum.inc
The cubical complex is an example of a structured complex useful in computational mathematics (specially rigorous
numerics) and image analysis.
@@ -47,8 +40,8 @@ be a set of two elements).
For further details and theory of cubical complexes, please consult :cite:`kaczynski2004computational` as well as the
following paper :cite:`peikert2012topological`.
-Data structure.
----------------
+Data structure
+--------------
The implementation of Cubical complex provides a representation of complexes that occupy a rectangular region in
:math:`\mathbb{R}^n`. This extra assumption allows for a memory efficient way of storing cubical complexes in a form
@@ -77,8 +70,8 @@ Knowing the sizes of the bitmap, by a series of modulo operation, we can determi
present in the product that gives the cube :math:`C`. In a similar way, we can compute boundary and the coboundary of
each cube. Further details can be found in the literature.
-Input Format.
--------------
+Input Format
+------------
In the current implantation, filtration is given at the maximal cubes, and it is then extended by the lower star
filtration to all cubes. There are a number of constructors that can be used to construct cubical complex by users
@@ -91,7 +84,7 @@ Currently one input from a text file is used. It uses a format inspired from the
we allow any filtration values. As a consequence one cannot use ``-1``'s to indicate missing cubes. If you have
missing cubes in your complex, please set their filtration to :math:`+\infty` (aka. ``inf`` in the file).
-The file format is described in details in :ref:`Perseus file format` file format section.
+The file format is described in details in `Perseus file format <fileformats.html#perseus>`_ section.
.. testcode::
@@ -108,8 +101,8 @@ the program output is:
Cubical complex is of dimension 2 - 49 simplices.
-Periodic boundary conditions.
------------------------------
+Periodic boundary conditions
+----------------------------
Often one would like to impose periodic boundary conditions to the cubical complex (cf.
:doc:`periodic_cubical_complex_ref`).
@@ -120,7 +113,7 @@ conditions are imposed in all directions, then complex :math:`\mathcal{K}` becam
various constructors from the file Bitmap_cubical_complex_periodic_boundary_conditions_base.h to construct cubical
complex with periodic boundary conditions.
-One can also use Perseus style input files (see :doc:`Perseus <fileformats>`) for the specific periodic case:
+One can also use Perseus style input files (see `Perseus file format <fileformats.html#perseus>`_) for the specific periodic case:
.. testcode::
@@ -154,14 +147,13 @@ the program output is:
Periodic cubical complex is of dimension 2 - 42 simplices.
-Examples.
----------
+Examples
+--------
End user programs are available in python/example/ folder.
-Bibliography
-============
+Tutorial
+--------
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docnames
- :style: unsrt
+This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-cubical-complexes.ipynb>`_
+explains how to represent sublevels sets of functions using cubical complexes.
diff --git a/src/python/doc/datasets.inc b/src/python/doc/datasets.inc
new file mode 100644
index 00000000..95a87678
--- /dev/null
+++ b/src/python/doc/datasets.inc
@@ -0,0 +1,14 @@
+.. table::
+ :widths: 30 40 30
+
+ +-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
+ | .. figure:: | Datasets either generated or fetched. | :Authors: Hind Montassif |
+ | img/sphere_3d.png | | |
+ | | | :Since: GUDHI 3.5.0 |
+ | | | |
+ | | | :License: MIT (`LGPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ |
+ +-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
+ | * :doc:`datasets` |
+ +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/datasets.rst b/src/python/doc/datasets.rst
new file mode 100644
index 00000000..2d11a19d
--- /dev/null
+++ b/src/python/doc/datasets.rst
@@ -0,0 +1,133 @@
+
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+================
+Datasets manual
+================
+
+Datasets generators
+===================
+
+We provide the generation of different customizable datasets to use as inputs for Gudhi complexes and data structures.
+
+Points generators
+------------------
+
+The module **points** enables the generation of random points on a sphere, random points on a torus and as a grid.
+
+Points on sphere
+^^^^^^^^^^^^^^^^
+
+The function **sphere** enables the generation of random i.i.d. points uniformly on a (d-1)-sphere in :math:`R^d`.
+The user should provide the number of points to be generated on the sphere :code:`n_samples` and the ambient dimension :code:`ambient_dim`.
+The :code:`radius` of sphere is optional and is equal to **1** by default.
+Only random points generation is currently available.
+
+The generated points are given as an array of shape :math:`(n\_samples, ambient\_dim)`.
+
+Example
+"""""""
+
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+ from gudhi import AlphaComplex
+
+ # Generate 50 points on a sphere in R^2
+ gen_points = points.sphere(n_samples = 50, ambient_dim = 2, radius = 1, sample = "random")
+
+ # Create an alpha complex from the generated points
+ alpha_complex = AlphaComplex(points = gen_points)
+
+.. autofunction:: gudhi.datasets.generators.points.sphere
+
+Points on a flat torus
+^^^^^^^^^^^^^^^^^^^^^^
+
+You can also generate points on a torus.
+
+Two functions are available and give the same output: the first one depends on **CGAL** and the second does not and consists of full python code.
+
+On another hand, two sample types are provided: you can either generate i.i.d. points on a d-torus in :math:`R^{2d}` *randomly* or on a *grid*.
+
+First function: **ctorus**
+"""""""""""""""""""""""""""
+
+The user should provide the number of points to be generated on the torus :code:`n_samples`, and the dimension :code:`dim` of the torus on which points would be generated in :math:`R^{2dim}`.
+The :code:`sample` argument is optional and is set to **'random'** by default.
+In this case, the returned generated points would be an array of shape :math:`(n\_samples, 2*dim)`.
+Otherwise, if set to **'grid'**, the points are generated on a grid and would be given as an array of shape:
+
+.. math::
+
+ ( ⌊n\_samples^{1 \over {dim}}⌋^{dim}, 2*dim )
+
+**Note 1:** The output array first shape is rounded down to the closest perfect :math:`dim^{th}` power.
+
+**Note 2:** This version is recommended when the user wishes to use **'grid'** as sample type, or **'random'** with a relatively small number of samples (~ less than 150).
+
+Example
+"""""""
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+
+ # Generate 50 points randomly on a torus in R^6
+ gen_points = points.ctorus(n_samples = 50, dim = 3)
+
+ # Generate 27 points on a torus as a grid in R^6
+ gen_points = points.ctorus(n_samples = 50, dim = 3, sample = 'grid')
+
+.. autofunction:: gudhi.datasets.generators.points.ctorus
+
+Second function: **torus**
+"""""""""""""""""""""""""""
+
+The user should provide the number of points to be generated on the torus :code:`n_samples` and the dimension :code:`dim` of the torus on which points would be generated in :math:`R^{2dim}`.
+The :code:`sample` argument is optional and is set to **'random'** by default.
+The other allowed value of sample type is **'grid'**.
+
+**Note:** This version is recommended when the user wishes to use **'random'** as sample type with a great number of samples and a low dimension.
+
+Example
+"""""""
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+
+ # Generate 50 points randomly on a torus in R^6
+ gen_points = points.torus(n_samples = 50, dim = 3)
+
+ # Generate 27 points on a torus as a grid in R^6
+ gen_points = points.torus(n_samples = 50, dim = 3, sample = 'grid')
+
+
+.. autofunction:: gudhi.datasets.generators.points.torus
+
+
+Fetching datasets
+=================
+
+We provide some ready-to-use datasets that are not available by default when getting GUDHI, and need to be fetched explicitly.
+
+By **default**, the fetched datasets directory is set to a folder named **'gudhi_data'** in the **user home folder**.
+Alternatively, it can be set using the **'GUDHI_DATA'** environment variable.
+
+.. autofunction:: gudhi.datasets.remote.fetch_bunny
+
+.. figure:: ./img/bunny.png
+ :figclass: align-center
+
+ 3D Stanford bunny with 35947 vertices.
+
+
+.. autofunction:: gudhi.datasets.remote.fetch_spiral_2d
+
+.. figure:: ./img/spiral_2d.png
+ :figclass: align-center
+
+ 2D spiral with 114562 vertices.
+
+.. autofunction:: gudhi.datasets.remote.clear_data_home
diff --git a/src/python/doc/differentiation_sum.inc b/src/python/doc/differentiation_sum.inc
new file mode 100644
index 00000000..140cf180
--- /dev/null
+++ b/src/python/doc/differentiation_sum.inc
@@ -0,0 +1,12 @@
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.6.0
+ - :License: MIT
+ - :Requires: `TensorFlow <installation.html#tensorflow>`_
+
+We provide TensorFlow 2 models that can handle automatic differentiation for the computation of persistence diagrams from complexes available in the Gudhi library.
+This includes simplex trees, cubical complexes and Vietoris-Rips complexes. Detailed example on how to use these layers in practice are available
+in the following `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-optimization.ipynb>`_. Note that even if TensorFlow GPU is enabled, all
+internal computations using Gudhi will be done on CPU.
diff --git a/src/python/doc/examples.rst b/src/python/doc/examples.rst
index a42227e3..1442f185 100644
--- a/src/python/doc/examples.rst
+++ b/src/python/doc/examples.rst
@@ -7,27 +7,31 @@ Examples
.. only:: builder_html
- * :download:`rips_complex_from_points_example.py <../example/rips_complex_from_points_example.py>`
+ * :download:`alpha_complex_diagram_persistence_from_off_file_example.py <../example/alpha_complex_diagram_persistence_from_off_file_example.py>`
+ * :download:`alpha_complex_from_generated_points_on_sphere_example.py <../example/alpha_complex_from_generated_points_on_sphere_example.py>`
* :download:`alpha_complex_from_points_example.py <../example/alpha_complex_from_points_example.py>`
- * :download:`simplex_tree_example.py <../example/simplex_tree_example.py>`
* :download:`alpha_rips_persistence_bottleneck_distance.py <../example/alpha_rips_persistence_bottleneck_distance.py>`
- * :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>`
- * :download:`alpha_complex_diagram_persistence_from_off_file_example.py <../example/alpha_complex_diagram_persistence_from_off_file_example.py>`
- * :download:`periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py <../example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py>`
* :download:`bottleneck_basic_example.py <../example/bottleneck_basic_example.py>`
- * :download:`gudhi_graphical_tools_example.py <../example/gudhi_graphical_tools_example.py>`
- * :download:`plot_simplex_tree_dim012.py <../example/plot_simplex_tree_dim012.py>`
- * :download:`plot_rips_complex.py <../example/plot_rips_complex.py>`
- * :download:`plot_alpha_complex.py <../example/plot_alpha_complex.py>`
- * :download:`witness_complex_from_nearest_landmark_table.py <../example/witness_complex_from_nearest_landmark_table.py>`
+ * :download:`coordinate_graph_induced_complex.py <../example/coordinate_graph_induced_complex.py>`
+ * :download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>`
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
- * :download:`rips_complex_diagram_persistence_from_off_file_example.py <../example/rips_complex_diagram_persistence_from_off_file_example.py>`
+ * :download:`functional_graph_induced_complex.py <../example/functional_graph_induced_complex.py>`
+ * :download:`gudhi_graphical_tools_example.py <../example/gudhi_graphical_tools_example.py>`
+ * :download:`nerve_of_a_covering.py <../example/nerve_of_a_covering.py>`
+ * :download:`periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py <../example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py>`
+ * :download:`plot_alpha_complex.py <../example/plot_alpha_complex.py>`
+ * :download:`plot_rips_complex.py <../example/plot_rips_complex.py>`
+ * :download:`plot_simplex_tree_dim012.py <../example/plot_simplex_tree_dim012.py>`
+ * :download:`random_cubical_complex_persistence_example.py <../example/random_cubical_complex_persistence_example.py>`
+ * :download:`rips_complex_diagram_persistence_from_correlation_matrix_file_example.py <../example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py>`
* :download:`rips_complex_diagram_persistence_from_distance_matrix_file_example.py <../example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py>`
+ * :download:`rips_complex_diagram_persistence_from_off_file_example.py <../example/rips_complex_diagram_persistence_from_off_file_example.py>`
+ * :download:`rips_complex_edge_collapse_example.py <../example/rips_complex_edge_collapse_example.py>`
+ * :download:`rips_complex_from_points_example.py <../example/rips_complex_from_points_example.py>`
* :download:`rips_persistence_diagram.py <../example/rips_persistence_diagram.py>`
+ * :download:`simplex_tree_example.py <../example/simplex_tree_example.py>`
* :download:`sparse_rips_persistence_diagram.py <../example/sparse_rips_persistence_diagram.py>`
- * :download:`random_cubical_complex_persistence_example.py <../example/random_cubical_complex_persistence_example.py>`
- * :download:`coordinate_graph_induced_complex.py <../example/coordinate_graph_induced_complex.py>`
- * :download:`functional_graph_induced_complex.py <../example/functional_graph_induced_complex.py>`
+ * :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>`
* :download:`voronoi_graph_induced_complex.py <../example/voronoi_graph_induced_complex.py>`
- * :download:`nerve_of_a_covering.py <../example/nerve_of_a_covering.py>`
+ * :download:`witness_complex_from_nearest_landmark_table.py <../example/witness_complex_from_nearest_landmark_table.py>`
diff --git a/src/python/doc/fileformats.rst b/src/python/doc/fileformats.rst
index 345dfdba..ae1b00f3 100644
--- a/src/python/doc/fileformats.rst
+++ b/src/python/doc/fileformats.rst
@@ -80,8 +80,6 @@ Here is a simple sample file in the 3D case::
1. 1. 1.
-.. _Perseus file format:
-
Perseus
*******
diff --git a/src/python/doc/img/barycenter.png b/src/python/doc/img/barycenter.png
new file mode 100644
index 00000000..cad6af70
--- /dev/null
+++ b/src/python/doc/img/barycenter.png
Binary files differ
diff --git a/src/python/doc/img/sklearn.png b/src/python/doc/img/sklearn.png
new file mode 100644
index 00000000..d1fecbbf
--- /dev/null
+++ b/src/python/doc/img/sklearn.png
Binary files differ
diff --git a/src/python/doc/img/sphere_3d.png b/src/python/doc/img/sphere_3d.png
new file mode 100644
index 00000000..70f3184f
--- /dev/null
+++ b/src/python/doc/img/sphere_3d.png
Binary files differ
diff --git a/src/python/doc/img/spiral-color.png b/src/python/doc/img/spiral-color.png
new file mode 100644
index 00000000..21b62dfc
--- /dev/null
+++ b/src/python/doc/img/spiral-color.png
Binary files differ
diff --git a/src/python/doc/img/spiral_2d.png b/src/python/doc/img/spiral_2d.png
new file mode 100644
index 00000000..abd247cd
--- /dev/null
+++ b/src/python/doc/img/spiral_2d.png
Binary files differ
diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst
index 3387a64f..35f4ba46 100644
--- a/src/python/doc/index.rst
+++ b/src/python/doc/index.rst
@@ -53,8 +53,8 @@ Tangential complex
Topological descriptors computation
***********************************
-Persistence cohomology
-======================
+Persistent cohomology
+=====================
.. include:: persistent_cohomology_sum.inc
@@ -71,6 +71,7 @@ Wasserstein distance
.. include:: wasserstein_distance_sum.inc
+
Persistence representations
===========================
@@ -86,9 +87,12 @@ Point cloud utilities
.. include:: point_cloud_sum.inc
-Bibliography
-************
+Clustering
+**********
+
+.. include:: clustering.inc
+
+Datasets
+********
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docnames
- :style: unsrt
+.. include:: datasets.inc
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 40f3f44b..5491542f 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -5,38 +5,61 @@
Installation
############
-Conda
-*****
-The easiest way to install the Python version of GUDHI is using
-`conda <https://gudhi.inria.fr/conda/>`_.
+Packages
+********
+The easiest way to install the Python version of GUDHI is using pre-built packages.
+We recommend `conda <https://gudhi.inria.fr/conda/>`_
+
+.. code-block:: bash
+
+ conda install -c conda-forge gudhi
+
+Gudhi is also available on `PyPI <https://pypi.org/project/gudhi/>`_
+
+.. code-block:: bash
+
+ pip install gudhi
+
+Third party packages are also available, for instance on Debian or Ubuntu
+
+.. code-block:: bash
+
+ apt install python3-gudhi
+
+In all cases, you may still want to install some of the optional `run time dependencies`_.
Compiling
*********
-The library uses c++14 and requires `Boost <https://www.boost.org/>`_ ≥ 1.56.0,
-`CMake <https://www.cmake.org/>`_ ≥ 3.1 to generate makefiles,
-`NumPy <http://numpy.org>`_ and `Cython <https://www.cython.org/>`_ to compile
-the GUDHI Python module.
-It is a multi-platform library and compiles on Linux, Mac OSX and Visual
-Studio 2015.
-
-On `Windows <https://wiki.python.org/moin/WindowsCompilers>`_ , only Python
-≥ 3.5 are available because of the required Visual Studio version.
-
-On other systems, if you have several Python/python installed, the version 2.X
-will be used by default, but you can force it by adding
+These instructions are for people who want to compile gudhi from source, they are
+unnecessary if you installed a binary package of Gudhi as above. They assume that
+you have downloaded a `release <https://github.com/GUDHI/gudhi-devel/releases>`_,
+with a name like `gudhi.3.X.Y.tar.gz`, then run `tar xf gudhi.3.X.Y.tar.gz`, which
+created a directory `gudhi.3.X.Y`, hereinafter referred to as `/path-to-gudhi/`.
+If you are instead using a git checkout, beware that the paths are a bit
+different, and in particular the `python/` subdirectory is actually `src/python/`
+there.
+
+The library uses c++17 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.66.0,
+`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.5 to generate makefiles,
+Python :math:`\geq` 3.5, `NumPy <http://numpy.org>`_ :math:`\geq` 1.15.0, `Cython <https://www.cython.org/>`_
+:math:`\geq` 0.27 and `pybind11 <https://github.com/pybind/pybind11>`_ to compile the GUDHI Python module.
+It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2017 or later.
+
+If you have several Python/python installed, the version 2.X may be used by default, but you can force it by adding
:code:`-DPython_ADDITIONAL_VERSIONS=3` to the cmake command.
GUDHI Python module compilation
===============================
-To build the GUDHI Python module, run the following commands in a terminal:
+After making sure that the `Compilation dependencies`_ are properly installed,
+one can build the GUDHI Python module, by running the following commands in a terminal:
.. code-block:: bash
cd /path-to-gudhi/
mkdir build
cd build/
- cmake ..
+ cmake -DCMAKE_BUILD_TYPE=Release ..
cd python
make
@@ -70,20 +93,14 @@ Or install it definitely in your Python packages folder:
.. code-block:: bash
cd /path-to-gudhi/build/python
- # May require sudo or administrator privileges
- make install
+ python setup.py install # add --user to the command if you do not have the permission
+ # Or 'pip install .'
.. note::
- :code:`make install` is only a
- `CMake custom targets <https://cmake.org/cmake/help/latest/command/add_custom_target.html>`_
- to shortcut :code:`python setup.py install` command.
It does not take into account :code:`CMAKE_INSTALL_PREFIX`.
- But one can use :code:`python setup.py install ...` specific options in the python directory:
-
-.. code-block:: bash
-
- python setup.py install --prefix /home/gudhi # Install in /home/gudhi directory
+ But one can use
+ `alternate location installation <https://docs.python.org/3/install/#alternate-installation>`_.
Test suites
===========
@@ -119,63 +136,74 @@ If :code:`import gudhi` succeeds, please have a look to debug information:
.. code-block:: python
- import gudhi
- print(gudhi.__debug_info__)
+ import gudhi as gd
+ print(gd.__debug_info__)
+ print("+ Installed modules are: " + gd.__available_modules)
+ print("+ Missing modules are: " + gd.__missing_modules)
You shall have something like:
.. code-block:: none
- Python version 2.7.15
- Cython version 0.26.1
- Numpy version 1.14.1
- Eigen3 version 3.1.1
- Installed modules are: off_reader;simplex_tree;rips_complex;
- cubical_complex;periodic_cubical_complex;reader_utils;witness_complex;
- strong_witness_complex;alpha_complex;
- Missing modules are: bottleneck_distance;nerve_gic;subsampling;
- tangential_complex;persistence_graphical_tools;
- euclidean_witness_complex;euclidean_strong_witness_complex;
- CGAL version 4.7.1000
- GMP_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmp.so
- GMPXX_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmpxx.so
- TBB version 9107 found and used
+ Pybind11 version 2.8.1
+ Python version 3.7.12
+ Cython version 0.29.25
+ Numpy version 1.21.4
+ Boost version 1.77.0
+ + Installed modules are: off_utils;simplex_tree;rips_complex;cubical_complex;periodic_cubical_complex;
+ persistence_graphical_tools;reader_utils;witness_complex;strong_witness_complex;
+ + Missing modules are: bottleneck;nerve_gic;subsampling;tangential_complex;alpha_complex;euclidean_witness_complex;
+ euclidean_strong_witness_complex;
-Here, you can see that bottleneck_distance, nerve_gic, subsampling and
-tangential_complex are missing because of the CGAL version.
-persistence_graphical_tools is not available as matplotlib is not
-available.
+Here, you can see that the modules that need CGAL are missing, because CGAL is not installed.
+:code:`persistence_graphical_tools` is installed, but
+`its functions <https://gudhi.inria.fr/python/latest/persistence_graphical_tools_ref.html>`_ will produce an error as
+matplotlib is not available.
Unitary tests cannot be run as pytest is missing.
A complete configuration would be :
.. code-block:: none
- Python version 3.6.5
- Cython version 0.28.2
- Pytest version 3.3.2
- Matplotlib version 2.2.2
- Numpy version 1.14.5
- Eigen3 version 3.3.4
- Installed modules are: off_reader;simplex_tree;rips_complex;
- cubical_complex;periodic_cubical_complex;persistence_graphical_tools;
- reader_utils;witness_complex;strong_witness_complex;
- persistence_graphical_tools;bottleneck_distance;nerve_gic;subsampling;
- tangential_complex;alpha_complex;euclidean_witness_complex;
- euclidean_strong_witness_complex;
- CGAL header only version 4.11.0
+ Pybind11 version 2.8.1
+ Python version 3.9.7
+ Cython version 0.29.24
+ Pytest version 6.2.5
+ Matplotlib version 3.5.0
+ Numpy version 1.21.4
+ Scipy version 1.7.3
+ Scikit-learn version 1.0.1
+ POT version 0.8.0
+ HNSWlib found
+ PyKeOps version [pyKeOps]: 2.1
+ EagerPy version 0.30.0
+ TensorFlow version 2.7.0
+ Sphinx version 4.3.0
+ Sphinx-paramlinks version 0.5.2
+ python_docs_theme found
+ Eigen3 version 3.4.0
+ Boost version 1.74.0
+ CGAL version 5.3
GMP_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmp.so
GMPXX_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmpxx.so
+ MPFR_LIBRARIES = /usr/lib/x86_64-linux-gnu/libmpfr.so
TBB version 9107 found and used
+ + Installed modules are: bottleneck;off_utils;simplex_tree;rips_complex;cubical_complex;periodic_cubical_complex;
+ persistence_graphical_tools;reader_utils;witness_complex;strong_witness_complex;nerve_gic;subsampling;
+ tangential_complex;alpha_complex;euclidean_witness_complex;euclidean_strong_witness_complex;
+ + Missing modules are:
+
Documentation
=============
-To build the documentation, `sphinx-doc <http://www.sphinx-doc.org>`_ and
-`sphinxcontrib-bibtex <https://sphinxcontrib-bibtex.readthedocs.io>`_ are
+To build the documentation, `sphinx-doc <http://www.sphinx-doc.org>`_,
+`sphinxcontrib-bibtex <https://sphinxcontrib-bibtex.readthedocs.io>`_,
+`sphinxcontrib-paramlinks <https://github.com/sqlalchemyorg/sphinx-paramlinks>`_ and
+`python-docs-theme <https://github.com/python/python-docs-theme>`_ are
required. As the documentation is auto-tested, `CGAL`_, `Eigen`_,
-`Matplotlib`_, `NumPy`_ and `SciPy`_ are also mandatory to build the
-documentation.
+`Matplotlib`_, `NumPy`_, `POT`_, `Scikit-learn`_ and `SciPy`_ are
+also mandatory to build the documentation.
Run the following commands in a terminal:
@@ -187,19 +215,25 @@ Run the following commands in a terminal:
Optional third-party library
****************************
+Compilation dependencies
+========================
+
+These third party dependencies are detected by `CMake <https://www.cmake.org/>`_.
+They have to be installed before performing the `GUDHI Python module compilation`_.
+
CGAL
-====
+----
Some GUDHI modules (cf. :doc:`modules list </index>`), and few examples
-require CGAL, a C++ library that provides easy access to efficient and
-reliable geometric algorithms.
+require `CGAL <https://www.cgal.org/>`_, a C++ library that provides easy
+access to efficient and reliable geometric algorithms.
The procedure to install this library
according to your operating system is detailed
`here <http://doc.cgal.org/latest/Manual/installation.html>`_.
-The following examples requires CGAL version ≥ 4.11.0:
+The following examples require CGAL version :math:`\geq` 4.11.0:
.. only:: builder_html
@@ -211,14 +245,14 @@ The following examples requires CGAL version ≥ 4.11.0:
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
Eigen
-=====
+-----
Some GUDHI modules (cf. :doc:`modules list </index>`), and few examples
require `Eigen <http://eigen.tuxfamily.org/>`_, a C++ template
library for linear algebra: matrices, vectors, numerical solvers, and related
algorithms.
-The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version ≥ 3.1.0:
+The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version :math:`\geq` 3.1.0:
.. only:: builder_html
@@ -228,8 +262,46 @@ The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
+Threading Building Blocks
+-------------------------
+
+`Intel® TBB <https://www.threadingbuildingblocks.org/>`_ lets you easily write
+parallel C++ programs that take full advantage of multicore performance, that
+are portable and composable, and that have future-proof scalability.
+
+Having Intel® TBB installed is recommended to parallelize and accelerate some
+GUDHI computations.
+
+Run time dependencies
+=====================
+
+These third party dependencies are detected by Python `import` mechanism at run time.
+They can be installed when required.
+
+EagerPy
+-------
+
+Some Python functions can handle automatic differentiation (possibly only when
+a flag `enable_autodiff=True` is used). In order to reduce code duplication, we
+use `EagerPy <https://eagerpy.jonasrauber.de/>`_ which wraps arrays from
+PyTorch, TensorFlow and JAX in a common interface.
+
+Joblib
+------
+
+`Joblib <https://joblib.readthedocs.io/>`_ is used both as a dependency of `Scikit-learn`_,
+and directly for parallelism in some modules (:class:`~gudhi.point_cloud.knn.KNearestNeighbors`,
+:func:`~gudhi.representations.metrics.pairwise_persistence_diagram_distances`).
+
+Hnswlib
+-------
+
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`Hnswlib <https://github.com/nmslib/hnswlib>`_ as a backend if explicitly
+requested, to speed-up queries.
+
Matplotlib
-==========
+----------
The :doc:`persistence graphical tools </persistence_graphical_tools_user>`
module requires `Matplotlib <http://matplotlib.org>`_, a Python 2D plotting
@@ -250,43 +322,92 @@ The following examples require the `Matplotlib <http://matplotlib.org>`_:
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
+LaTeX
+~~~~~
+
+If a sufficiently complete LaTeX toolchain is available (including dvipng and ghostscript), the LaTeX option of
+matplotlib is enabled for prettier captions (cf.
+`matplotlib text rendering with LaTeX <https://matplotlib.org/3.3.0/tutorials/text/usetex.html>`_).
+It also requires `type1cm` LaTeX package (not detected by matplotlib).
+
+If you are facing issues with LaTeX rendering, like this one:
+
+.. code-block:: none
+
+ Traceback (most recent call last):
+ File "/usr/lib/python3/dist-packages/matplotlib/texmanager.py", line 302, in _run_checked_subprocess
+ report = subprocess.check_output(command,
+ ...
+ ! LaTeX Error: File `type1cm.sty' not found.
+ ...
+
+This is because the LaTeX package is not installed on your system. On Ubuntu systems you can install texlive-full
+(for all LaTeX packages), or more specific packages like texlive-latex-extra, cm-super.
+
+You can still deactivate LaTeX rendering by saying:
+
+.. code-block:: python
+
+ import gudhi as gd
+ gd.persistence_graphical_tools._gudhi_matplotlib_use_tex=False
+
+PyKeOps
+-------
+
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`PyKeOps <https://www.kernel-operations.io/keops/python/>`_ as a backend if
+explicitly requested, to speed-up queries using a GPU.
+
Python Optimal Transport
-========================
+------------------------
The :doc:`Wasserstein distance </wasserstein_distance_user>`
-module requires `POT <https://pot.readthedocs.io/>`_, a library that provides
+module requires `POT <https://pythonot.github.io/>`_, a library that provides
several solvers for optimization problems related to Optimal Transport.
+PyTorch
+-------
+
+`PyTorch <https://pytorch.org/>`_ is currently only used as a dependency of
+`PyKeOps`_, and in some tests.
+
Scikit-learn
-============
+------------
The :doc:`persistence representations </representations>` module require
`scikit-learn <https://scikit-learn.org/>`_, a Python-based ecosystem of
open-source software for machine learning.
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`scikit-learn <https://scikit-learn.org/>`_ as a backend if explicitly
+requested.
+
SciPy
-=====
+-----
The :doc:`persistence graphical tools </persistence_graphical_tools_user>` and
:doc:`Wasserstein distance </wasserstein_distance_user>` modules require `SciPy
<http://scipy.org>`_, a Python-based ecosystem of open-source software for
mathematics, science, and engineering.
-Threading Building Blocks
-=========================
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`SciPy <http://scipy.org>`_ :math:`\geq` 1.6.0 as a backend if explicitly requested.
-`Intel® TBB <https://www.threadingbuildingblocks.org/>`_ lets you easily write
-parallel C++ programs that take full advantage of multicore performance, that
-are portable and composable, and that have future-proof scalability.
+TensorFlow
+----------
-Having Intel® TBB installed is recommended to parallelize and accelerate some
-GUDHI computations.
+The :doc:`cubical complex </cubical_complex_tflow_itf_ref>`, :doc:`simplex tree </ls_simplex_tree_tflow_itf_ref>`
+and :doc:`Rips complex </rips_complex_tflow_itf_ref>` modules require `TensorFlow <https://www.tensorflow.org>`_
+for incorporating them in neural nets.
+
+`TensorFlow <https://www.tensorflow.org>`_ is also used in some automatic differentiation tests.
Bug reports and contributions
*****************************
-Please help us improving the quality of the GUDHI library. You may report bugs or suggestions to:
-
- Contact: gudhi-users@lists.gforge.inria.fr
+Please help us improving the quality of the GUDHI library.
+You may `report bugs <https://github.com/GUDHI/gudhi-devel/issues>`_ or
+`contact us <https://gudhi.inria.fr/contact/>`_ for any suggestions.
-GUDHI is open to external contributions. If you want to join our development team, please contact us.
+GUDHI is open to external contributions. If you want to join our development team, please take some time to read our
+`contributing guide <https://github.com/GUDHI/gudhi-devel/blob/master/.github/CONTRIBUTING.md>`_.
diff --git a/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
new file mode 100644
index 00000000..9d7d633f
--- /dev/null
+++ b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
@@ -0,0 +1,53 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for lower-star persistence on simplex trees
+############################################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from lower-star filtration of a simplex tree
+-------------------------------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import LowerStarSimplexTreeLayer
+ import tensorflow as tf
+ import gudhi as gd
+
+ st = gd.SimplexTree()
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ F = tf.Variable([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=tf.float32, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [F])
+ print(grads[0].indices.numpy())
+ print(grads[0].values.numpy())
+
+.. testoutput::
+
+ [2 4]
+ [-1. 1.]
+
+Documentation for LowerStarSimplexTreeLayer
+-------------------------------------------
+
+.. autoclass:: gudhi.tensorflow.LowerStarSimplexTreeLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/nerve_gic_complex_sum.inc b/src/python/doc/nerve_gic_complex_sum.inc
index d633c4ff..7db6c124 100644
--- a/src/python/doc/nerve_gic_complex_sum.inc
+++ b/src/python/doc/nerve_gic_complex_sum.inc
@@ -1,16 +1,16 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière |
- | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | |
- | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Introduced in: GUDHI 2.3.0 |
- | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | |
- | | the image of a scalar-valued function defined on the data. | :Copyright: MIT (`GPL v3 </licensing/>`_) |
- | | | |
- | | | :Requires: `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | | |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` |
- +----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+
+ | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière |
+ | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | |
+ | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 |
+ | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | |
+ | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ | | | |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+
+ | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst
index 9101f45d..8633cadb 100644
--- a/src/python/doc/nerve_gic_complex_user.rst
+++ b/src/python/doc/nerve_gic_complex_user.rst
@@ -12,8 +12,8 @@ Definition
Visualizations of the simplicial complexes can be done with either
neato (from `graphviz <http://www.graphviz.org/>`_),
`geomview <http://www.geomview.org/>`_,
-`KeplerMapper <https://github.com/MLWave/kepler-mapper>`_.
-Input point clouds are assumed to be OFF files (cf. :doc:`fileformats`).
+`KeplerMapper <https://github.com/scikit-tda/kepler-mapper>`_.
+Input point clouds are assumed to be OFF files (cf. `OFF file format <fileformats.html#off-file-format>`_).
Covers
------
@@ -50,7 +50,7 @@ The cover C comes from the preimages of intervals (10 intervals with gain 0.3)
covering the height function (coordinate 2),
which are then refined into their connected components using the triangulation of the .OFF file.
-.. testcode::
+.. code-block:: python
import gudhi
nerve_complex = gudhi.CoverComplex()
@@ -99,9 +99,6 @@ the program output is:
[-0.171433, 0.367393]
[-0.909111, 0.745853]
0 interval(s) in dimension 1:
-
-.. testoutput::
-
Nerve is of dimension 1 - 41 simplices - 21 vertices.
[0]
[1]
diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc
index 0cdf8072..7ff63ae2 100644
--- a/src/python/doc/persistence_graphical_tools_sum.inc
+++ b/src/python/doc/persistence_graphical_tools_sum.inc
@@ -1,14 +1,14 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
- | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau |
- | img/graphical_tools_representation.png | the user to build easily persistence barcode, diagram or density. | |
- | | | :Introduced in: GUDHI 2.0.0 |
- | | | |
- | | | :Copyright: MIT |
- | | | |
- | | | :Requires: matplotlib, numpy and scipy |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
- | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe |
+ | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | |
+ | | | :Since: GUDHI 2.0.0 |
+ | | Note that these functions return the matplotlib axis, allowing | |
+ | | for further modifications (title, aspect, etc.) | :License: MIT |
+ | | | |
+ | | | :Requires: `Matplotlib <installation.html#matplotlib>`_ |
+ +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` |
+ +-----------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index 80002db6..e1d28c71 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -12,15 +12,12 @@ Definition
Show persistence as a barcode
-----------------------------
-.. note::
- this function requires matplotlib and numpy to be available
-
This function can display the persistence result as a barcode:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
off_file = gudhi.__root_source_dir__ + '/data/points/tore3D_300.off'
@@ -31,41 +28,53 @@ This function can display the persistence result as a barcode:
diag = simplex_tree.persistence(min_persistence=0.4)
gudhi.plot_persistence_barcode(diag)
- plot.show()
+ plt.show()
Show persistence as a diagram
-----------------------------
-.. note::
- this function requires matplotlib and numpy to be available
-
This function can display the persistence result as a diagram:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
'/data/persistence_diagram/rips_on_tore3D_1307.pers'
- gudhi.plot_persistence_diagram(persistence_file=persistence_file,
+ ax = gudhi.plot_persistence_diagram(persistence_file=persistence_file,
legend=True)
- plot.show()
+ # We can modify the title, aspect, etc.
+ ax.set_title("Persistence diagram of a torus")
+ ax.set_aspect("equal") # forces to be square shaped
+ plt.show()
+
+Note that (as barcode and density) it can also take a simple `np.array`
+of shape (N x 2) encoding a persistence diagram (in a given dimension).
+
+.. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ import gudhi
+ import numpy as np
+ d = np.array([[0., 1.], [1., 2.], [1., np.inf]])
+ gudhi.plot_persistence_diagram(d)
+ plt.show()
Persistence density
-------------------
-.. note::
- this function requires matplotlib, numpy and scipy to be available
+:Requires: `SciPy <installation.html#scipy>`_
If you want more information on a specific dimension, for instance:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
@@ -75,9 +84,20 @@ If you want more information on a specific dimension, for instance:
only_this_dim=1)
pers_diag = [(1, elt) for elt in birth_death]
# Use subplots to display diagram and density side by side
- fig, axes = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
+ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
gudhi.plot_persistence_diagram(persistence=pers_diag,
axes=axes[0])
gudhi.plot_persistence_density(persistence=pers_diag,
dimension=1, legend=True, axes=axes[1])
- plot.show()
+ plt.show()
+
+LaTeX support
+-------------
+
+If you are facing issues with `LaTeX <installation.html#latex>`_ rendering, you can still deactivate LaTeX rendering by
+saying:
+
+.. code-block:: python
+
+ import gudhi
+ gudhi.persistence_graphical_tools._gudhi_matplotlib_use_tex=False
diff --git a/src/python/doc/persistent_cohomology_sum.inc b/src/python/doc/persistent_cohomology_sum.inc
index 4d7b077e..58e44b8a 100644
--- a/src/python/doc/persistent_cohomology_sum.inc
+++ b/src/python/doc/persistent_cohomology_sum.inc
@@ -1,18 +1,16 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
+-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
| .. figure:: | The theory of homology consists in attaching to a topological space | :Author: Clément Maria |
| ../../doc/Persistent_cohomology/3DTorus_poch.png | a sequence of (homology) groups, capturing global topological | |
- | :figclass: align-center | features like connected components, holes, cavities, etc. Persistent | :Introduced in: GUDHI 2.0.0 |
+ | :figclass: align-center | features like connected components, holes, cavities, etc. Persistent | :Since: GUDHI 2.0.0 |
| | homology studies the evolution -- birth, life and death -- of these | |
- | Rips Persistent Cohomology on a 3D | features when the topological space is changing. Consequently, the | :Copyright: MIT |
- | Torus | theory is essentially composed of three elements: topological spaces, | |
- | | their homology groups and an evolution scheme. | |
+ | Rips Persistent Cohomology on a 3D Torus | features when the topological space is changing. | :License: MIT |
| | | |
| | Computation of persistent cohomology using the algorithm of | |
| | :cite:`DBLP:journals/dcg/SilvaMV11` and | |
- | | :cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed | |
+ | | :cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed | |
| | Annotation Matrix implementation of | |
| | :cite:`DBLP:conf/esa/BoissonnatDM13`. | |
| | | |
diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst
index de83cda1..39744b95 100644
--- a/src/python/doc/persistent_cohomology_user.rst
+++ b/src/python/doc/persistent_cohomology_user.rst
@@ -6,22 +6,27 @@ Persistent cohomology user manual
=================================
Definition
----------
-===================================== ===================================== =====================================
-:Author: Clément Maria :Introduced in: GUDHI PYTHON 2.0.0 :Copyright: GPL v3
-===================================== ===================================== =====================================
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
-| :doc:`persistent_cohomology_user` | Please refer to each data structure that contains persistence |
-| | feature for reference: |
-| | |
-| | * :doc:`simplex_tree_ref` |
-| | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+ * - :Author: Clément Maria
+ - :Since: GUDHI 2.0.0
+ - :License: MIT
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :doc:`persistent_cohomology_user`
+ - Please refer to each data structure that contains persistence feature for reference:
+ * :doc:`simplex_tree_ref`
+ * :doc:`cubical_complex_ref`
+ * :doc:`periodic_cubical_complex_ref`
Computation of persistent cohomology using the algorithm of :cite:`DBLP:journals/dcg/SilvaMV11` and
-:cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed Annotation Matrix implementation of
+:cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed Annotation Matrix implementation of
:cite:`DBLP:conf/esa/BoissonnatDM13`.
The theory of homology consists in attaching to a topological space a sequence of (homology) groups, capturing global
@@ -111,10 +116,3 @@ We provide several example files: run these examples with -h for details on thei
* :download:`rips_complex_diagram_persistence_from_distance_matrix_file_example.py <../example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py>`
* :download:`random_cubical_complex_persistence_example.py <../example/random_cubical_complex_persistence_example.py>`
* :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>`
-
-Bibliography
-============
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docnames
- :style: unsrt
diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst
index d668428a..473b303f 100644
--- a/src/python/doc/point_cloud.rst
+++ b/src/python/doc/point_cloud.rst
@@ -13,10 +13,40 @@ File Readers
.. autofunction:: gudhi.read_lower_triangular_matrix_from_csv_file
+File Writers
+------------
+
+.. autofunction:: gudhi.write_points_to_off_file
+
Subsampling
-----------
+:Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
+
.. automodule:: gudhi.subsampling
:members:
:special-members:
:show-inheritance:
+
+Time Delay Embedding
+--------------------
+
+.. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding
+ :members:
+ :special-members: __call__
+
+K nearest neighbors
+-------------------
+
+.. automodule:: gudhi.point_cloud.knn
+ :members:
+ :undoc-members:
+ :special-members: __init__
+
+Distance to measure
+-------------------
+
+.. automodule:: gudhi.point_cloud.dtm
+ :members:
+ :undoc-members:
+ :special-members: __init__
diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc
index 85d52de7..f955c3ab 100644
--- a/src/python/doc/point_cloud_sum.inc
+++ b/src/python/doc/point_cloud_sum.inc
@@ -1,15 +1,12 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, etc. | :Author: Vincent Rouvreau |
- | | :math:`(y_1, y_2, \ldots, y_d)` | | |
- | | | :Introduced in: GUDHI 2.0.0 |
- | | | |
- | | | :Copyright: MIT (`GPL v3 </licensing/>`_) |
- | | Parts of this package require CGAL. | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`point_cloud` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+
+ | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, | :Authors: Vincent Rouvreau, Marc Glisse, Masatoshi Takenouchi |
+ | | :math:`(y_1, y_2, \ldots, y_d)` | find neighbors, embed time series in higher dimension, | |
+ | | estimate a density, etc. | :Since: GUDHI 2.0.0 |
+ | | | |
+ | | | :License: MIT (`GPL v3 </licensing/>`_, BSD-3-Clause, Apache-2.0) |
+ +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+
+ | * :doc:`point_cloud` |
+ +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/python3-sphinx-build.py b/src/python/doc/python3-sphinx-build.py
deleted file mode 100755
index 84d158cf..00000000
--- a/src/python/doc/python3-sphinx-build.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Emulate sphinx-build for python3
-"""
-
-from sys import exit, argv
-from sphinx import main
-
-if __name__ == '__main__':
- exit(main(argv))
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
index 11dcbcf9..b0477197 100644
--- a/src/python/doc/representations.rst
+++ b/src/python/doc/representations.rst
@@ -10,13 +10,47 @@ Representations manual
This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
-A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time.
+A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes.
-A small example is provided
+Examples
+--------
-.. only:: builder_html
+Landscapes
+^^^^^^^^^^
- * :download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>`
+This example computes the first two Landscapes associated to a persistence diagram with four points. The landscapes are evaluated on ten samples, leading to two vectors with ten coordinates each, that are eventually concatenated in order to produce a single vector representation.
+
+.. testcode::
+
+ import numpy as np
+ from gudhi.representations import Landscape
+ # A single diagram with 4 points
+ D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])
+ diags = [D]
+ l=Landscape(num_landscapes=2,resolution=10).fit_transform(diags)
+ print(l)
+
+The output is:
+
+.. testoutput::
+
+ [[1.02851895 2.05703791 2.57129739 1.54277843 0.89995409 1.92847304
+ 2.95699199 3.08555686 2.05703791 1.02851895 0. 0.64282435
+ 0. 0. 0.51425948 0. 0. 0.
+ 0.77138922 1.02851895]]
+
+Various kernels
+^^^^^^^^^^^^^^^
+
+This small example is also provided
+:download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>`
+
+Machine Learning and Topological Data Analysis
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-representations.ipynb>`_ explains how to
+efficiently combine machine learning and topological data analysis with the
+:doc:`representations module<representations>`.
Preprocessing
@@ -46,27 +80,3 @@ Metrics
:members:
:special-members:
:show-inheritance:
-
-Basic example
--------------
-
-This example computes the first two Landscapes associated to a persistence diagram with four points. The landscapes are evaluated on ten samples, leading to two vectors with ten coordinates each, that are eventually concatenated in order to produce a single vector representation.
-
-.. testcode::
-
- import numpy as np
- from gudhi.representations import Landscape
- # A single diagram with 4 points
- D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])
- diags = [D]
- l=Landscape(num_landscapes=2,resolution=10).fit_transform(diags)
- print(l)
-
-The output is:
-
-.. testoutput::
-
- [[1.02851895 2.05703791 2.57129739 1.54277843 0.89995409 1.92847304
- 2.95699199 3.08555686 2.05703791 1.02851895 0. 0.64282435
- 0. 0. 0.51425948 0. 0. 0.
- 0.77138922 1.02851895]]
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
index 700828f1..9515f044 100644
--- a/src/python/doc/representations_sum.inc
+++ b/src/python/doc/representations_sum.inc
@@ -1,14 +1,14 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
- | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière |
- | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
- | | | :Introduced in: GUDHI 3.1.0 |
- | | | |
- | | | :Copyright: MIT |
- | | | |
- | | | :Requires: scikit-learn |
- +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
- | * :doc:`representations` |
- +------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer, Gard Spreemann, Wojciech Reise |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
+ | | | :Since: GUDHI 3.1.0 |
+ | | | |
+ | | | :License: MIT |
+ | | | |
+ | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst
index 22b5616c..f0582d5c 100644
--- a/src/python/doc/rips_complex_ref.rst
+++ b/src/python/doc/rips_complex_ref.rst
@@ -12,3 +12,25 @@ Rips complex reference manual
:show-inheritance:
.. automethod:: gudhi.RipsComplex.__init__
+
+======================================
+Weighted Rips complex reference manual
+======================================
+
+.. autoclass:: gudhi.weighted_rips_complex.WeightedRipsComplex
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. automethod:: gudhi.weighted_rips_complex.WeightedRipsComplex.__init__
+
+=================================
+DTM Rips complex reference manual
+=================================
+
+.. autoclass:: gudhi.dtm_rips_complex.DTMRipsComplex
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. automethod:: gudhi.dtm_rips_complex.DTMRipsComplex.__init__ \ No newline at end of file
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index 857c6893..2b125e54 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -1,16 +1,19 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+
- | .. figure:: | Rips complex is a simplicial complex constructed from a one skeleton | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse |
- | ../../doc/Rips_complex/rips_complex_representation.png | graph. | |
- | :figclass: align-center | | :Introduced in: GUDHI 2.0.0 |
- | | The filtration value of each edge is computed from a user-given | |
- | | distance function and is inserted until a user-given threshold | :Copyright: MIT |
- | | value. | |
- | | | |
- | | This complex can be built from a point cloud and a distance function, | |
- | | or from a distance matrix. | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+
- | * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
- +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | .. figure:: | The Vietoris-Rips complex is a simplicial complex built as the | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse, Yuichi Ike |
+ | ../../doc/Rips_complex/rips_complex_representation.png | clique-complex of a proximity graph. | |
+ | :figclass: align-center | | :Since: GUDHI 2.0.0 |
+ | | We also provide sparse approximations, to speed-up the computation | |
+ | | of persistent homology, and weighted versions, which are more robust | :License: MIT |
+ | | to outliers. | |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | .. image:: | * :doc:`rips_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_tflow_itf_ref.rst b/src/python/doc/rips_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..3ce75868
--- /dev/null
+++ b/src/python/doc/rips_complex_tflow_itf_ref.rst
@@ -0,0 +1,48 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for Vietoris-Rips persistence
+##############################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from Vietoris-Rips persistence
+-----------------------------------------------------------
+
+.. testsetup::
+
+ import numpy
+ numpy.set_printoptions(precision=4)
+
+.. testcode::
+
+ from gudhi.tensorflow import RipsLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[1.,1.],[2.,2.]], dtype=tf.float32, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testcleanup::
+
+ numpy.set_printoptions(precision=8)
+
+.. testoutput::
+
+ [[-0.5 -0.5]
+ [ 0.5 0.5]]
+
+Documentation for RipsLayer
+---------------------------
+
+.. autoclass:: gudhi.tensorflow.RipsLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index a27573e8..a4e83462 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -7,13 +7,7 @@ Rips complex user manual
Definition
----------
-==================================================================== ================================ ======================
-:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse :Introduced in: GUDHI 2.0.0 :Copyright: GPL v3
-==================================================================== ================================ ======================
-
-+-------------------------------------------+----------------------------------------------------------------------+
-| :doc:`rips_complex_user` | :doc:`rips_complex_ref` |
-+-------------------------------------------+----------------------------------------------------------------------+
+.. include:: rips_complex_sum.inc
The `Rips complex <https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex>`_ is a simplicial complex that
generalizes proximity (:math:`\varepsilon`-ball) graphs to higher dimensions. The vertices correspond to the input
@@ -40,9 +34,6 @@ A vertex name corresponds to the index of the point in the given range (aka. the
On this example, as edges (4,5), (4,6) and (5,6) are in the complex, simplex (4,5,6) is added with the filtration value
set with :math:`max(filtration(4,5), filtration(4,6), filtration(5,6))`. And so on for simplex (0,1,2,3).
-If the :doc:`RipsComplex <rips_complex_ref>` interfaces are not detailed enough for your need, please refer to
-rips_persistence_step_by_step.cpp C++ example, where the graph construction over the Simplex_tree is more detailed.
-
A Rips complex can easily become huge, even if we limit the length of the edges
and the dimension of the simplices. One easy trick, before building a Rips
complex on a point cloud, is to call :func:`~gudhi.sparsify_point_set` which removes points
@@ -61,6 +52,13 @@ construction of a :class:`~gudhi.RipsComplex` object asks it to build a sparse R
parameter :math:`\varepsilon=0.3`, while the default `sparse=None` builds the
regular Rips complex.
+Another option which is especially useful if you want to compute persistent homology in "high" dimension (2 or more,
+sometimes even 1), is to build the Rips complex only up to dimension 1 (a graph), then use
+:func:`~gudhi.SimplexTree.collapse_edges` to reduce the size of this graph, and finally call
+:func:`~gudhi.SimplexTree.expansion` to get a simplicial complex of a suitable dimension to compute its homology. This
+trick gives the same persistence diagram as one would get with a plain use of `RipsComplex`, with a complex that is
+often significantly smaller and thus faster to process.
+
Point cloud
-----------
@@ -123,54 +121,44 @@ Notice that if we use
asking for a very sparse version (theory only gives some guarantee on the meaning of the output if `sparse<1`),
2 to 5 edges disappear, depending on the random vertex used to start the sparsification.
-Example from OFF file
-^^^^^^^^^^^^^^^^^^^^^
+Example step by step
+^^^^^^^^^^^^^^^^^^^^
-This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
-points in an OFF file, and max_edge_length value.
-Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
+While :doc:`RipsComplex <rips_complex_ref>` is convenient, for instance to build a simplicial complex in one line
-Finally, it is asked to display information about the Rips complex.
+.. testcode::
+
+ import gudhi
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ cplx = gudhi.RipsComplex(points=points, max_edge_length=12.0).create_simplex_tree(max_dimension=2)
+you can achieve the same result without this class for more flexibility
.. testcode::
- import gudhi
- off_file = gudhi.__root_source_dir__ + '/data/points/alphacomplexdoc.off'
- point_cloud = gudhi.read_points_from_off_file(off_file = off_file)
- rips_complex = gudhi.RipsComplex(points=point_cloud, max_edge_length=12.0)
- simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
- result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
- fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
- print(fmt % tuple(filtered_value))
+ import gudhi
+ from scipy.spatial.distance import cdist
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ distance_matrix = cdist(points, points)
+ cplx = gudhi.SimplexTree.create_from_array(distance_matrix, max_filtration=12.0)
+ cplx.expansion(2)
-the program output is:
+or
-.. testoutput::
+.. testcode::
- Rips complex is of dimension 1 - 18 simplices - 7 vertices.
- [0] -> 0.00
- [1] -> 0.00
- [2] -> 0.00
- [3] -> 0.00
- [4] -> 0.00
- [5] -> 0.00
- [6] -> 0.00
- [2, 3] -> 5.00
- [4, 5] -> 5.39
- [0, 2] -> 5.83
- [0, 1] -> 6.08
- [1, 3] -> 6.32
- [1, 2] -> 6.71
- [5, 6] -> 7.28
- [2, 4] -> 8.94
- [0, 3] -> 9.43
- [4, 6] -> 9.49
- [3, 6] -> 11.00
+ import gudhi
+ from scipy.spatial import cKDTree
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ tree = cKDTree(points)
+ edges = tree.sparse_distance_matrix(tree, max_distance=12.0, output_type="coo_matrix")
+ cplx = gudhi.SimplexTree()
+ cplx.insert_edges_from_coo_matrix(edges)
+ cplx.expansion(2)
+
+
+This way, you can easily add a call to :func:`~gudhi.SimplexTree.collapse_edges` before the expansion,
+use a different metric to compute the matrix, or other variations.
Distance matrix
---------------
@@ -229,54 +217,7 @@ until dimension 1 - one skeleton graph in other words), the output is:
[4, 6] -> 9.49
[3, 6] -> 11.00
-Example from csv file
-^^^^^^^^^^^^^^^^^^^^^
-
-This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
-distance matrix in a csv file, and max_edge_length value.
-Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
-
-Finally, it is asked to display information about the Rips complex.
-
-
-.. testcode::
-
- import gudhi
- distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=gudhi.__root_source_dir__ + \
- '/data/distance_matrix/full_square_distance_matrix.csv')
- rips_complex = gudhi.RipsComplex(distance_matrix=distance_matrix, max_edge_length=12.0)
- simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
- result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
- fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
- print(fmt % tuple(filtered_value))
-
-the program output is:
-
-.. testoutput::
-
- Rips complex is of dimension 1 - 18 simplices - 7 vertices.
- [0] -> 0.00
- [1] -> 0.00
- [2] -> 0.00
- [3] -> 0.00
- [4] -> 0.00
- [5] -> 0.00
- [6] -> 0.00
- [2, 3] -> 5.00
- [4, 5] -> 5.39
- [0, 2] -> 5.83
- [0, 1] -> 6.08
- [1, 3] -> 6.32
- [1, 2] -> 6.71
- [5, 6] -> 7.28
- [2, 4] -> 8.94
- [0, 3] -> 9.43
- [4, 6] -> 9.49
- [3, 6] -> 11.00
+In case this lower triangular matrix is stored in a CSV file, like `data/distance_matrix/full_square_distance_matrix.csv` in the Gudhi distribution, you can read it with :func:`~gudhi.read_lower_triangular_matrix_from_csv_file`.
Correlation matrix
------------------
@@ -347,3 +288,76 @@ until dimension 1 - one skeleton graph in other words), the output is:
points in the persistence diagram will be under the diagonal, and
bottleneck distance and persistence graphical tool will not work properly,
this is a known issue.
+
+Weighted Rips Complex
+---------------------
+
+`WeightedRipsComplex <rips_complex_ref.html#weighted-rips-complex-reference-manual>`_ builds a simplicial complex from a distance matrix and weights on vertices.
+
+
+Example from a distance matrix and weights
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices.
+
+.. testcode::
+
+ from gudhi.weighted_rips_complex import WeightedRipsComplex
+ dist = [[], [1]]
+ weights = [1, 100]
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ print(list(st.get_filtration()))
+
+The output is:
+
+.. testoutput::
+
+ [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)]
+
+Example from a point cloud combined with DistanceToMeasure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-DTM-filtrations.ipynb>`_.
+Remark that `DTMRipsComplex <rips_complex_user.html#dtm-rips-complex>`_ class provides exactly this function.
+
+.. testcode::
+
+ import numpy as np
+ from scipy.spatial.distance import cdist
+ from gudhi.point_cloud.dtm import DistanceToMeasure
+ from gudhi.weighted_rips_complex import WeightedRipsComplex
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dist = cdist(pts,pts)
+ dtm = DistanceToMeasure(2, q=2, metric="precomputed")
+ r = dtm.fit_transform(dist)
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ print(st.persistence())
+
+The output is:
+
+.. testoutput::
+
+ [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))]
+
+DTM Rips Complex
+----------------
+
+:class:`~gudhi.dtm_rips_complex.DTMRipsComplex` builds a simplicial complex from a point set or a full distance matrix (in the form of ndarray), as described in the above example.
+This class constructs a weighted Rips complex giving larger weights to outliers, which reduces their impact on the persistence diagram. See `this notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-DTM-filtrations.ipynb>`_ for some experiments.
+
+.. testcode::
+
+ import numpy as np
+ from gudhi.dtm_rips_complex import DTMRipsComplex
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dtm_rips = DTMRipsComplex(points=pts, k=2)
+ st = dtm_rips.create_simplex_tree(max_dimension=2)
+ print(st.persistence())
+
+The output is:
+
+.. testoutput::
+
+ [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))]
diff --git a/src/python/doc/simplex_tree_ref.rst b/src/python/doc/simplex_tree_ref.rst
index 9eb8c199..46b2c1e5 100644
--- a/src/python/doc/simplex_tree_ref.rst
+++ b/src/python/doc/simplex_tree_ref.rst
@@ -8,7 +8,6 @@ Simplex tree reference manual
.. autoclass:: gudhi.SimplexTree
:members:
- :undoc-members:
:show-inheritance:
.. automethod:: gudhi.SimplexTree.__init__
diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc
index 5ba58d2b..6b534c9e 100644
--- a/src/python/doc/simplex_tree_sum.inc
+++ b/src/python/doc/simplex_tree_sum.inc
@@ -1,13 +1,18 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
- | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
- | :alt: Simplex tree representation | | :Introduced in: GUDHI 2.0.0 |
- | :figclass: align-center | The data structure is described in | |
- | | :cite:`boissonnatmariasimplextreealgorithmica` | :Copyright: MIT |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
+ | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
+ | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The data structure is described in | |
+ | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | .. image:: | * :doc:`ls_simplex_tree_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
diff --git a/src/python/doc/tangential_complex_sum.inc b/src/python/doc/tangential_complex_sum.inc
index d84aa433..2f330a07 100644
--- a/src/python/doc/tangential_complex_sum.inc
+++ b/src/python/doc/tangential_complex_sum.inc
@@ -1,14 +1,14 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin |
- | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | |
- | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Introduced in: GUDHI 2.0.0 |
- | | an unknown manifold. The running time depends only linearly on the | |
- | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :Copyright: MIT (`GPL v3 </licensing/>`_) |
- | | dimension :math:`k`. | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin |
+ | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in | |
+ | :figclass: align-center | :math:`d`-dimensional Euclidean space. The input is a point sample | :Since: GUDHI 2.0.0 |
+ | | coming from an unknown manifold. The running time depends only linearly| |
+ | | on the extrinsic dimension :math:`d` and exponentially on the intrinsic| :License: MIT (`GPL v3 </licensing/>`_) |
+ | | dimension :math:`k`. | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` |
+ +----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst
index 852cf5b6..3d45473b 100644
--- a/src/python/doc/tangential_complex_user.rst
+++ b/src/python/doc/tangential_complex_user.rst
@@ -194,11 +194,3 @@ The output is:
Tangential contains 4 vertices.
Inconsistencies has been fixed.
-
-
-Bibliography
-============
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docnames
- :style: unsrt
diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc
index a97f428d..c41de017 100644
--- a/src/python/doc/wasserstein_distance_sum.inc
+++ b/src/python/doc/wasserstein_distance_sum.inc
@@ -1,14 +1,12 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe |
- | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams. It's the minimum value c that can be achieved | |
- | :figclass: align-center | by a perfect matching between the points of the two diagrams (+ all | :Introduced in: GUDHI 3.1.0 |
- | | diagonal points), where the value of a matching is defined as the | |
- | Wasserstein distance is the q-th root of the sum of the | q-th root of the sum of all edge lengths to the power q. Edge lengths| :Copyright: MIT |
- | edge lengths to the power q. | are measured in norm p, for :math:`1 \leq p \leq \infty`. | |
- | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 |
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`wasserstein_distance_user` | |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+
+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe, Marc Glisse |
+ | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | |
+ | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 |
+ | | barycenters of a family of persistence diagrams. | |
+ | | | :License: MIT, BSD-3-Clause |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+
+ | * :doc:`wasserstein_distance_user` | |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst
index 32999a0c..76eb1469 100644
--- a/src/python/doc/wasserstein_distance_user.rst
+++ b/src/python/doc/wasserstein_distance_user.rst
@@ -9,28 +9,52 @@ Definition
.. include:: wasserstein_distance_sum.inc
-This implementation is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport".
+The q-Wasserstein distance is defined as the minimal value achieved
+by a perfect matching between the points of the two diagrams (+ all
+diagonal points), where the value of a matching is defined as the
+q-th root of the sum of all edge lengths to the power q. Edge lengths
+are measured in norm p, for :math:`1 \leq p \leq \infty`.
+
+Distance Functions
+------------------
+
+Optimal Transport
+*****************
+
+:Requires: `Python Optimal Transport <installation.html#python-optimal-transport>`_ (POT) :math:`\geq` 0.5.1
+
+This first implementation uses the `Python Optimal Transport <installation.html#python-optimal-transport>`_
+library and is based on ideas from "Large Scale Computation of Means and Cluster for Persistence
+Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`.
-Function
---------
.. autofunction:: gudhi.wasserstein.wasserstein_distance
+Hera
+****
+
+This other implementation comes from `Hera
+<https://bitbucket.org/grey_narn/hera/src/master/>`_ (BSD-3-Clause) which is
+based on "Geometry Helps to Compare Persistence Diagrams"
+:cite:`Kerber:2017:GHC:3047249.3064175` by Michael Kerber, Dmitriy
+Morozov, and Arnur Nigmetov.
+
+.. autofunction:: gudhi.hera.wasserstein_distance
Basic example
--------------
+*************
-This example computes the 1-Wasserstein distance from 2 persistence diagrams with euclidean ground metric.
-Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values.
+This example computes the 1-Wasserstein distance from 2 persistence diagrams with Euclidean ground metric.
+Note that persistence diagrams must be submitted as (n x 2) numpy arrays.
.. testcode::
import gudhi.wasserstein
import numpy as np
- diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
- diag2 = np.array([[2.8, 4.45],[9.5, 14.1]])
+ dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
+ dgm2 = np.array([[2.8, 4.45],[9.5, 14.1]])
- message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(diag1, diag2, order=1., internal_p=2.)
+ message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, order=1., internal_p=2.)
print(message)
The output is:
@@ -38,3 +62,140 @@ The output is:
.. testoutput::
Wasserstein distance value = 1.45
+
+We can also have access to the optimal matching by letting `matching=True`.
+It is encoded as a list of indices (i,j), meaning that the i-th point in X
+is mapped to the j-th point in Y.
+An index of -1 represents the diagonal.
+It handles essential parts (points with infinite coordinates). However if the cardinalities of the essential parts differ,
+any matching has a cost +inf and thus can be considered to be optimal. In such a case, the function returns `(np.inf, None)`.
+
+.. testcode::
+
+ import gudhi.wasserstein
+ import numpy as np
+
+ dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974], [3, np.inf]])
+ dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1], [4, np.inf]])
+ cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, matching=True, order=1, internal_p=2)
+
+ message_cost = "Wasserstein distance value = %.2f" %cost
+ print(message_cost)
+ dgm1_to_diagonal = matchings[matchings[:,1] == -1, 0]
+ dgm2_to_diagonal = matchings[matchings[:,0] == -1, 1]
+ off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0)
+
+ for i,j in off_diagonal_match:
+ print("point %s in dgm1 is matched to point %s in dgm2" %(i,j))
+ for i in dgm1_to_diagonal:
+ print("point %s in dgm1 is matched to the diagonal" %i)
+ for j in dgm2_to_diagonal:
+ print("point %s in dgm2 is matched to the diagonal" %j)
+
+ # An example where essential part cardinalities differ
+ dgm3 = np.array([[1, 2], [0, np.inf]])
+ dgm4 = np.array([[1, 2], [0, np.inf], [1, np.inf]])
+ cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm3, dgm4, matching=True, order=1, internal_p=2)
+ print("\nSecond example:")
+ print("cost:", cost)
+ print("matchings:", matchings)
+
+
+The output is:
+
+.. testoutput::
+
+ Wasserstein distance value = 3.15
+ point 0 in dgm1 is matched to point 0 in dgm2
+ point 1 in dgm1 is matched to point 2 in dgm2
+ point 3 in dgm1 is matched to point 3 in dgm2
+ point 2 in dgm1 is matched to the diagonal
+ point 1 in dgm2 is matched to the diagonal
+
+ Second example:
+ cost: inf
+ matchings: None
+
+
+Barycenters
+-----------
+
+:Requires: `Python Optimal Transport <installation.html#python-optimal-transport>`_ (POT) :math:`\geq` 0.5.1
+
+A Frechet mean (or barycenter) is a generalization of the arithmetic
+mean in a non linear space such as the one of persistence diagrams.
+Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is
+defined as a minimizer of the variance functional, that is of
+:math:`\mu \mapsto \sum_{i=1}^n d_2(\mu,\mu_i)^2`.
+where :math:`d_2` denotes the Wasserstein-2 distance between
+persistence diagrams.
+It is known to exist and is generically unique. However, an exact
+computation is in general untractable. Current implementation
+available is based on (Turner et al., 2014),
+:cite:`turner2014frechet`
+and uses an EM-scheme to
+provide a local minimum of the variance functional (somewhat similar
+to the Lloyd algorithm to estimate a solution to the k-means
+problem). The local minimum returned depends on the initialization of
+the barycenter.
+The combinatorial structure of the algorithm limits its
+performances on large scale problems (thousands of diagrams and of points
+per diagram).
+
+.. figure::
+ ./img/barycenter.png
+ :figclass: align-center
+
+ Illustration of Frechet mean between persistence
+ diagrams.
+
+
+.. autofunction:: gudhi.wasserstein.barycenter.lagrangian_barycenter
+
+Basic example
+*************
+
+This example estimates the Frechet mean (aka Wasserstein barycenter) between
+four persistence diagrams.
+It is initialized on the 4th diagram.
+As the algorithm is not convex, its output depends on the initialization and
+is only a local minimum of the objective function.
+Initialization can be either given as an integer (in which case the i-th
+diagram of the list is used as initial estimate) or as a diagram.
+If None, it will randomly select one of the diagrams of the list
+as initial estimate.
+Note that persistence diagrams must be submitted as
+(n x 2) numpy arrays and must not contain inf values.
+
+
+.. testcode::
+
+ from gudhi.wasserstein.barycenter import lagrangian_barycenter
+ import numpy as np
+
+ dg1 = np.array([[0.2, 0.5]])
+ dg2 = np.array([[0.2, 0.7]])
+ dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]])
+ dg4 = np.array([])
+ pdiagset = [dg1, dg2, dg3, dg4]
+ bary = lagrangian_barycenter(pdiagset=pdiagset,init=3)
+
+ message = "Wasserstein barycenter estimated:"
+ print(message)
+ print(bary)
+
+The output is:
+
+.. testoutput::
+
+ Wasserstein barycenter estimated:
+ [[0.27916667 0.55416667]
+ [0.7375 0.7625 ]
+ [0.2375 0.2625 ]]
+
+Tutorial
+********
+
+This
+`notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-Barycenters-of-persistence-diagrams.ipynb>`_
+presents the concept of barycenter, or Fréchet mean, of a family of persistence diagrams.
diff --git a/src/python/doc/witness_complex_sum.inc b/src/python/doc/witness_complex_sum.inc
index 71b65a71..4416fec0 100644
--- a/src/python/doc/witness_complex_sum.inc
+++ b/src/python/doc/witness_complex_sum.inc
@@ -1,18 +1,18 @@
.. table::
- :widths: 30 50 20
+ :widths: 30 40 30
- +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich |
- | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | |
- | :alt: Witness complex representation | | :Introduced in: GUDHI 2.0.0 |
- | :figclass: align-center | The data structure is described in | |
- | | :cite:`boissonnatmariasimplextreealgorithmica`. | :Copyright: MIT (`GPL v3 </licensing/>`_ for Euclidean versions only) |
- | | | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 for Euclidean versions only |
- +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` |
- | | * :doc:`strong_witness_complex_ref` |
- | | * :doc:`euclidean_witness_complex_ref` |
- | | * :doc:`euclidean_strong_witness_complex_ref` |
- +-------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich |
+ | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | |
+ | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The data structure is described in | |
+ | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 </licensing/>`_ for Euclidean versions only) |
+ | | | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 for Euclidean versions only |
+ +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` |
+ | | * :doc:`strong_witness_complex_ref` |
+ | | * :doc:`euclidean_witness_complex_ref` |
+ | | * :doc:`euclidean_strong_witness_complex_ref` |
+ +-------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst
index 7087fa98..08dcd288 100644
--- a/src/python/doc/witness_complex_user.rst
+++ b/src/python/doc/witness_complex_user.rst
@@ -126,10 +126,3 @@ Example2: Computing persistence using strong relaxed witness complex
Here is an example of constructing a strong witness complex filtration and computing persistence on it:
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
-
-Bibliography
-============
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docnames
- :style: unsrt
diff --git a/src/python/doc/zbibliography.rst b/src/python/doc/zbibliography.rst
new file mode 100644
index 00000000..e23fcf25
--- /dev/null
+++ b/src/python/doc/zbibliography.rst
@@ -0,0 +1,10 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Bibliography
+------------
+
+.. bibliography:: ../../biblio/bibliography.bib
+ :style: plain
+
diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
index 4079a469..c96121a6 100755
--- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
@@ -1,11 +1,12 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
-import gudhi
+import gudhi as gd
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -22,12 +23,12 @@ parser = argparse.ArgumentParser(
description="AlphaComplex creation from " "points read in a OFF file.",
epilog="Example: "
"example/alpha_complex_diagram_persistence_from_off_file_example.py "
- "-f ../data/points/tore3D_300.off -a 0.6"
+ "-f ../data/points/tore3D_300.off"
"- Constructs a alpha complex with the "
"points from the given OFF file.",
)
parser.add_argument("-f", "--file", type=str, required=True)
-parser.add_argument("-a", "--max_alpha_square", type=float, default=0.5)
+parser.add_argument("-a", "--max_alpha_square", type=float, required=False)
parser.add_argument("-b", "--band", type=float, default=0.0)
parser.add_argument(
"--no-diagram",
@@ -38,32 +39,24 @@ parser.add_argument(
args = parser.parse_args()
-with open(args.file, "r") as f:
- first_line = f.readline()
- if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- print("#####################################################################")
- print("AlphaComplex creation from points read in a OFF file")
-
- message = "AlphaComplex with max_edge_length=" + repr(args.max_alpha_square)
- print(message)
-
- alpha_complex = gudhi.AlphaComplex(off_file=args.file)
- simplex_tree = alpha_complex.create_simplex_tree(
- max_alpha_square=args.max_alpha_square
- )
-
- message = "Number of simplices=" + repr(simplex_tree.num_simplices())
- print(message)
-
- diag = simplex_tree.persistence()
-
- print("betti_numbers()=")
- print(simplex_tree.betti_numbers())
-
- if args.no_diagram == False:
- gudhi.plot_persistence_diagram(diag, band=args.band)
- plot.show()
- else:
- print(args.file, "is not a valid OFF file")
-
- f.close()
+print("##############################################################")
+print("AlphaComplex creation from points read in a OFF file")
+
+points = gd.read_points_from_off_file(off_file = args.file)
+alpha_complex = gd.AlphaComplex(points = points)
+if args.max_alpha_square is not None:
+ print("with max_edge_length=", args.max_alpha_square)
+ simplex_tree = alpha_complex.create_simplex_tree(
+ max_alpha_square=args.max_alpha_square
+ )
+else:
+ simplex_tree = alpha_complex.create_simplex_tree()
+
+print("Number of simplices=", simplex_tree.num_simplices())
+
+diag = simplex_tree.persistence()
+print("betti_numbers()=", simplex_tree.betti_numbers())
+if args.no_diagram == False:
+ import matplotlib.pyplot as plot
+ gd.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py
new file mode 100644
index 00000000..3558077e
--- /dev/null
+++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+from gudhi.datasets.generators import _points
+from gudhi import AlphaComplex
+
+
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Hind Montassif
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+__author__ = "Hind Montassif"
+__copyright__ = "Copyright (C) 2021 Inria"
+__license__ = "MIT"
+
+print("#####################################################################")
+print("AlphaComplex creation from generated points on sphere")
+
+
+gen_points = _points.sphere(n_samples = 50, ambient_dim = 2, radius = 1, sample = "random")
+
+# Create an alpha complex
+alpha_complex = AlphaComplex(points = gen_points)
+simplex_tree = alpha_complex.create_simplex_tree()
+
+result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
+ repr(simplex_tree.num_simplices()) + ' simplices - ' + \
+ repr(simplex_tree.num_vertices()) + ' vertices.'
+print(result_str)
+
diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py
index 844d7a82..5d5ca66a 100755
--- a/src/python/example/alpha_complex_from_points_example.py
+++ b/src/python/example/alpha_complex_from_points_example.py
@@ -19,7 +19,7 @@ __license__ = "MIT"
print("#####################################################################")
print("AlphaComplex creation from points")
alpha_complex = AlphaComplex(points=[[0, 0], [1, 0], [0, 1], [1, 1]])
-simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=60.0)
+simplex_tree = alpha_complex.create_simplex_tree()
if simplex_tree.find([0, 1]):
print("[0, 1] Found !!")
@@ -47,7 +47,10 @@ else:
print("[4] Not found...")
print("dimension=", simplex_tree.dimension())
-print("filtrations=", simplex_tree.get_filtration())
+print("filtrations=")
+for simplex_with_filtration in simplex_tree.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("star([0])=", simplex_tree.get_star([0]))
print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1))
diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
index d5c33ec8..6b97fb3b 100755
--- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py
+++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
-import gudhi
+import gudhi as gd
import argparse
import math
+import numpy as np
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -32,74 +35,60 @@ parser.add_argument("-t", "--threshold", type=float, default=0.5)
parser.add_argument("-d", "--max_dimension", type=int, default=1)
args = parser.parse_args()
-with open(args.file, "r") as f:
- first_line = f.readline()
- if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- point_cloud = gudhi.read_points_from_off_file(off_file=args.file)
- print("#####################################################################")
- print("RipsComplex creation from points read in a OFF file")
-
- message = "RipsComplex with max_edge_length=" + repr(args.threshold)
- print(message)
-
- rips_complex = gudhi.RipsComplex(
- points=point_cloud, max_edge_length=args.threshold
- )
-
- rips_stree = rips_complex.create_simplex_tree(max_dimension=args.max_dimension)
-
- message = "Number of simplices=" + repr(rips_stree.num_simplices())
- print(message)
-
- rips_diag = rips_stree.persistence()
-
- print("#####################################################################")
- print("AlphaComplex creation from points read in a OFF file")
-
- message = "AlphaComplex with max_edge_length=" + repr(args.threshold)
- print(message)
-
- alpha_complex = gudhi.AlphaComplex(points=point_cloud)
- alpha_stree = alpha_complex.create_simplex_tree(
- max_alpha_square=(args.threshold * args.threshold)
- )
-
- message = "Number of simplices=" + repr(alpha_stree.num_simplices())
- print(message)
-
- alpha_diag = alpha_stree.persistence()
-
- max_b_distance = 0.0
- for dim in range(args.max_dimension):
- # Alpha persistence values needs to be transform because filtration
- # values are alpha square values
- funcs = [math.sqrt, math.sqrt]
- alpha_intervals = []
- for interval in alpha_stree.persistence_intervals_in_dimension(dim):
- alpha_intervals.append(
- map(lambda func, value: func(value), funcs, interval)
- )
-
- rips_intervals = rips_stree.persistence_intervals_in_dimension(dim)
- bottleneck_distance = gudhi.bottleneck_distance(
- rips_intervals, alpha_intervals
- )
- message = (
- "In dimension "
- + repr(dim)
- + ", bottleneck distance = "
- + repr(bottleneck_distance)
- )
- print(message)
- max_b_distance = max(bottleneck_distance, max_b_distance)
-
- print(
- "================================================================================"
- )
- message = "Bottleneck distance is " + repr(max_b_distance)
- print(message)
-
- else:
- print(args.file, "is not a valid OFF file")
-
- f.close()
+point_cloud = gd.read_points_from_off_file(off_file=args.file)
+print("##############################################################")
+print("RipsComplex creation from points read in a OFF file")
+
+message = "RipsComplex with max_edge_length=" + repr(args.threshold)
+print(message)
+
+rips_complex = gd.RipsComplex(
+ points=point_cloud, max_edge_length=args.threshold
+)
+
+rips_stree = rips_complex.create_simplex_tree(
+ max_dimension=args.max_dimension)
+
+message = "Number of simplices=" + repr(rips_stree.num_simplices())
+print(message)
+
+rips_stree.compute_persistence()
+
+print("##############################################################")
+print("AlphaComplex creation from points read in a OFF file")
+
+message = "AlphaComplex with max_edge_length=" + repr(args.threshold)
+print(message)
+
+alpha_complex = gd.AlphaComplex(points=point_cloud)
+alpha_stree = alpha_complex.create_simplex_tree(
+ max_alpha_square=(args.threshold * args.threshold)
+)
+
+message = "Number of simplices=" + repr(alpha_stree.num_simplices())
+print(message)
+
+alpha_stree.compute_persistence()
+
+max_b_distance = 0.0
+for dim in range(args.max_dimension):
+ # Alpha persistence values needs to be transform because filtration
+ # values are alpha square values
+ alpha_intervals = np.sqrt(alpha_stree.persistence_intervals_in_dimension(dim))
+
+ rips_intervals = rips_stree.persistence_intervals_in_dimension(dim)
+ bottleneck_distance = gd.bottleneck_distance(
+ rips_intervals, alpha_intervals
+ )
+ message = (
+ "In dimension "
+ + repr(dim)
+ + ", bottleneck distance = "
+ + repr(bottleneck_distance)
+ )
+ print(message)
+ max_b_distance = max(bottleneck_distance, max_b_distance)
+
+print("==============================================================")
+message = "Bottleneck distance is " + repr(max_b_distance)
+print(message)
diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py
index 119072eb..2801576e 100755
--- a/src/python/example/diagram_vectorizations_distances_kernels.py
+++ b/src/python/example/diagram_vectorizations_distances_kernels.py
@@ -5,30 +5,29 @@ import numpy as np
from sklearn.kernel_approximation import RBFSampler
from sklearn.preprocessing import MinMaxScaler
-from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
+from gudhi.representations import (DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
- SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel
+ SlicedWassersteinKernel, PersistenceFisherKernel, WassersteinDistance)
-D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = [D]
+D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]).fit_transform(diags)
+proc1 = DiagramSelector(use=True, point_type="finite")
+proc2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())])
+proc3 = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))])
+D1 = proc3(proc2(proc1(D1)))
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagram for vector methods")
plt.show()
LS = Landscape(resolution=1000)
-L = LS.fit_transform(diags)
-plt.plot(L[0][:1000])
-plt.plot(L[0][1000:2000])
-plt.plot(L[0][2000:3000])
+L = LS(D1)
+plt.plot(L[:1000])
+plt.plot(L[1000:2000])
+plt.plot(L[2000:3000])
plt.title("Landscape")
plt.show()
@@ -36,50 +35,39 @@ def pow(n):
return lambda x: np.power(x[1]-x[0],n)
SH = Silhouette(resolution=1000, weight=pow(2))
-sh = SH.fit_transform(diags)
-plt.plot(sh[0])
+plt.plot(SH(D1))
plt.title("Silhouette")
plt.show()
BC = BettiCurve(resolution=1000)
-bc = BC.fit_transform(diags)
-plt.plot(bc[0])
+plt.plot(BC(D1))
plt.title("Betti Curve")
plt.show()
CP = ComplexPolynomial(threshold=-1, polynomial_type="T")
-cp = CP.fit_transform(diags)
-print("Complex polynomial is " + str(cp[0,:]))
+print("Complex polynomial is " + str(CP(D1)))
TV = TopologicalVector(threshold=-1)
-tv = TV.fit_transform(diags)
-print("Topological vector is " + str(tv[0,:]))
+print("Topological vector is " + str(TV(D1)))
PI = PersistenceImage(bandwidth=.1, weight=lambda x: x[1], im_range=[0,1,0,1], resolution=[100,100])
-pi = PI.fit_transform(diags)
-plt.imshow(np.flip(np.reshape(pi[0], [100,100]), 0))
+plt.imshow(np.flip(np.reshape(PI(D1), [100,100]), 0))
plt.title("Persistence Image")
plt.show()
ET = Entropy(mode="scalar")
-et = ET.fit_transform(diags)
-print("Entropy statistic is " + str(et[0,:]))
+print("Entropy statistic is " + str(ET(D1)))
ET = Entropy(mode="vector", normalized=False)
-et = ET.fit_transform(diags)
-plt.plot(et[0])
+plt.plot(ET(D1))
plt.title("Entropy function")
plt.show()
-D = np.array([[1.,5.],[3.,6.],[2.,7.]])
-diags2 = [D]
+D2 = np.array([[1.,5.],[3.,6.],[2.,7.]])
+D2 = proc3(proc2(proc1(D2)))
-diags2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags2)
-
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
-D = diags2[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
+plt.scatter(D2[:,0], D2[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagrams for kernel methods")
plt.show()
@@ -88,46 +76,41 @@ def arctan(C,p):
return lambda x: C*np.arctan(np.power(x[1], p))
PWG = PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("PWG kernel is " + str(Y[0][0]))
+print("PWG kernel is " + str(PWG(D1, D2)))
PWG = PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("Approximate PWG kernel is " + str(Y[0][0]))
+print("Approximate PWG kernel is " + str(PWG(D1, D2)))
PSS = PersistenceScaleSpaceKernel(bandwidth=1.)
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("PSS kernel is " + str(Y[0][0]))
+print("PSS kernel is " + str(PSS(D1, D2)))
PSS = PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("Approximate PSS kernel is " + str(Y[0][0]))
+print("Approximate PSS kernel is " + str(PSS(D1, D2)))
sW = SlicedWassersteinDistance(num_directions=100)
-X = sW.fit(diags)
-Y = sW.transform(diags2)
-print("SW distance is " + str(Y[0][0]))
+print("SW distance is " + str(sW(D1, D2)))
SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.)
-X = SW.fit(diags)
-Y = SW.transform(diags2)
-print("SW kernel is " + str(Y[0][0]))
+print("SW kernel is " + str(SW(D1, D2)))
+
+try:
+ W = WassersteinDistance(order=2, internal_p=2, mode="pot")
+ print("Wasserstein distance (POT) is " + str(W(D1, D2)))
+except ImportError:
+ print("WassersteinDistance (POT) is not available, you may be missing pot.")
+
+W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001)
+print("Wasserstein distance (hera) is " + str(W(D1, D2)))
-W = BottleneckDistance(epsilon=.001)
-X = W.fit(diags)
-Y = W.transform(diags2)
-print("Bottleneck distance is " + str(Y[0][0]))
+try:
+ from gudhi.representations import BottleneckDistance
+ W = BottleneckDistance(epsilon=.001)
+ print("Bottleneck distance is " + str(W(D1, D2)))
+except ImportError:
+ print("BottleneckDistance is not available, you may be missing CGAL.")
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("PF kernel is " + str(Y[0][0]))
+print("PF kernel is " + str(PF(D1, D2)))
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("Approximate PF kernel is " + str(Y[0][0]))
+print("Approximate PF kernel is " + str(PF(D1, D2)))
diff --git a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
index 4903667e..4e97cfe3 100755
--- a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
+import errno
+import os
import gudhi
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -44,8 +47,9 @@ args = parser.parse_args()
with open(args.file, "r") as f:
first_line = f.readline()
if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- print("#####################################################################")
- print("EuclideanStrongWitnessComplex creation from points read in a OFF file")
+ print("##############################################################")
+ print("EuclideanStrongWitnessComplex creation from points read "\
+ "in a OFF file")
witnesses = gudhi.read_points_from_off_file(off_file=args.file)
landmarks = gudhi.pick_n_random_points(
@@ -64,7 +68,8 @@ with open(args.file, "r") as f:
witnesses=witnesses, landmarks=landmarks
)
simplex_tree = witness_complex.create_simplex_tree(
- max_alpha_square=args.max_alpha_square, limit_dimension=args.limit_dimension
+ max_alpha_square=args.max_alpha_square,
+ limit_dimension=args.limit_dimension
)
message = "Number of simplices=" + repr(simplex_tree.num_simplices())
@@ -76,9 +81,11 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(diag, band=args.band)
plot.show()
else:
- print(args.file, "is not a valid OFF file")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ args.file)
f.close()
diff --git a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
index 339a8577..29076c74 100755
--- a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
+import errno
+import os
import gudhi
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -75,9 +78,11 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(diag, band=args.band)
plot.show()
else:
- print(args.file, "is not a valid OFF file")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ args.file)
f.close()
diff --git a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
index c692e66f..ee3290c6 100755
--- a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
+++ b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
+import errno
+import os
import gudhi
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -57,9 +60,10 @@ parser.add_argument(
args = parser.parse_args()
if is_file_perseus(args.file):
- print("#####################################################################")
+ print("##################################################################")
print("PeriodicCubicalComplex creation")
- periodic_cubical_complex = gudhi.PeriodicCubicalComplex(perseus_file=args.file)
+ periodic_cubical_complex = gudhi.PeriodicCubicalComplex(
+ perseus_file=args.file)
print("persistence(homology_coeff_field=3, min_persistence=0)=")
diag = periodic_cubical_complex.persistence(
@@ -70,7 +74,9 @@ if is_file_perseus(args.file):
print("betti_numbers()=")
print(periodic_cubical_complex.betti_numbers())
if args.no_barcode == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_barcode(diag)
plot.show()
else:
- print(args.file, "is not a valid perseus style file")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ args.file)
diff --git a/src/python/example/plot_alpha_complex.py b/src/python/example/plot_alpha_complex.py
index 99c18a7c..0924619b 100755
--- a/src/python/example/plot_alpha_complex.py
+++ b/src/python/example/plot_alpha_complex.py
@@ -1,8 +1,9 @@
#!/usr/bin/env python
import numpy as np
-import gudhi
-ac = gudhi.AlphaComplex(off_file='../../data/points/tore3D_1307.off')
+import gudhi as gd
+points = gd.read_points_from_off_file(off_file = '../../data/points/tore3D_1307.off')
+ac = gd.AlphaComplex(points = points)
st = ac.create_simplex_tree()
points = np.array([ac.get_point(i) for i in range(st.num_vertices())])
# We want to plot the alpha-complex with alpha=0.1.
diff --git a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
index 1acb187c..0b35dbc5 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
@@ -2,7 +2,6 @@
import sys
import argparse
-import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
@@ -41,7 +40,7 @@ parser.add_argument(
args = parser.parse_args()
if not (-1.0 < args.min_edge_correlation < 1.0):
- print("Wrong value of the treshold corelation (should be between -1 and 1).")
+ print("Wrong value of the threshold corelation (should be between -1 and 1).")
sys.exit(1)
print("#####################################################################")
@@ -84,5 +83,6 @@ invert_diag = [
]
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(invert_diag, band=args.band)
plot.show()
diff --git a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
index 79ccca96..8a9cc857 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
@@ -22,11 +21,12 @@ parser = argparse.ArgumentParser(
description="RipsComplex creation from " "a distance matrix read in a csv file.",
epilog="Example: "
"example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py "
- "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3"
+ "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3"
"- Constructs a Rips complex with the "
"distance matrix from the given csv file.",
)
parser.add_argument("-f", "--file", type=str, required=True)
+parser.add_argument("-s", "--separator", type=str, required=True)
parser.add_argument("-e", "--max_edge_length", type=float, default=0.5)
parser.add_argument("-d", "--max_dimension", type=int, default=1)
parser.add_argument("-b", "--band", type=float, default=0.0)
@@ -45,7 +45,7 @@ print("RipsComplex creation from distance matrix read in a csv file")
message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length)
print(message)
-distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file)
+distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file, separator=args.separator)
rips_complex = gudhi.RipsComplex(
distance_matrix=distance_matrix, max_edge_length=args.max_edge_length
)
@@ -60,5 +60,6 @@ print("betti_numbers()=")
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(diag, band=args.band)
plot.show()
diff --git a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
index c757aca7..e80233a9 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
+import errno
+import os
import gudhi
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -42,10 +45,11 @@ args = parser.parse_args()
with open(args.file, "r") as f:
first_line = f.readline()
if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- print("#####################################################################")
+ print("##############################################################")
print("RipsComplex creation from points read in a OFF file")
- message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length)
+ message = "RipsComplex with max_edge_length=" + \
+ repr(args.max_edge_length)
print(message)
point_cloud = gudhi.read_points_from_off_file(off_file=args.file)
@@ -65,9 +69,11 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(diag, band=args.band)
plot.show()
else:
- print(args.file, "is not a valid OFF file")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ args.file)
f.close()
diff --git a/src/python/example/rips_complex_edge_collapse_example.py b/src/python/example/rips_complex_edge_collapse_example.py
new file mode 100755
index 00000000..b26eb9fc
--- /dev/null
+++ b/src/python/example/rips_complex_edge_collapse_example.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+import gudhi
+import matplotlib.pyplot as plt
+import time
+
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2016 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+__author__ = "Vincent Rouvreau"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+print("#####################################################################")
+print("RipsComplex (only the one-skeleton) creation from tore3D_300.off file")
+
+off_file = gudhi.__root_source_dir__ + '/data/points/tore3D_300.off'
+point_cloud = gudhi.read_points_from_off_file(off_file = off_file)
+rips_complex = gudhi.RipsComplex(points=point_cloud, max_edge_length=12.0)
+simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
+print('1. Rips complex is of dimension ', simplex_tree.dimension(), ' - ',
+ simplex_tree.num_simplices(), ' simplices - ',
+ simplex_tree.num_vertices(), ' vertices.')
+
+# Expansion of this one-skeleton would require a lot of memory. Let's collapse it
+start = time.process_time()
+simplex_tree.collapse_edges()
+print('2. Rips complex is of dimension ', simplex_tree.dimension(), ' - ',
+ simplex_tree.num_simplices(), ' simplices - ',
+ simplex_tree.num_vertices(), ' vertices.')
+simplex_tree.expansion(3)
+diag = simplex_tree.persistence()
+print("Collapse, expansion and persistence computation took ", time.process_time() - start, " sec.")
+
+# Use subplots to display diagram and density side by side
+fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
+gudhi.plot_persistence_diagram(diag, axes=axes[0])
+axes[0].set_title("Persistence after 1 collapse")
+
+# Collapse can be performed several times. Let's collapse it 3 times
+start = time.process_time()
+simplex_tree.collapse_edges(nb_iterations = 3)
+print('3. Rips complex is of dimension ', simplex_tree.dimension(), ' - ',
+ simplex_tree.num_simplices(), ' simplices - ',
+ simplex_tree.num_vertices(), ' vertices.')
+simplex_tree.expansion(3)
+diag = simplex_tree.persistence()
+print("Collapse, expansion and persistence computation took ", time.process_time() - start, " sec.")
+
+gudhi.plot_persistence_diagram(diag, axes=axes[1])
+axes[1].set_title("Persistence after 3 more collapses")
+
+# Plot the 2 persistence diagrams side to side to check the persistence is the same
+plt.show() \ No newline at end of file
diff --git a/src/python/example/rips_complex_from_points_example.py b/src/python/example/rips_complex_from_points_example.py
index 59d8a261..c05703c6 100755
--- a/src/python/example/rips_complex_from_points_example.py
+++ b/src/python/example/rips_complex_from_points_example.py
@@ -22,6 +22,9 @@ rips = gudhi.RipsComplex(points=[[0, 0], [1, 0], [0, 1], [1, 1]], max_edge_lengt
simplex_tree = rips.create_simplex_tree(max_dimension=1)
-print("filtrations=", simplex_tree.get_filtration())
+print("filtrations=")
+for simplex_with_filtration in simplex_tree.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("star([0])=", simplex_tree.get_star([0]))
print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1))
diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py
index 30de00da..c4635dc5 100755
--- a/src/python/example/simplex_tree_example.py
+++ b/src/python/example/simplex_tree_example.py
@@ -38,8 +38,14 @@ else:
print("dimension=", st.dimension())
-st.initialize_filtration()
-print("filtration=", st.get_filtration())
+print("simplices=")
+for simplex_with_filtration in st.get_simplices():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
+print("filtration=")
+for simplex_with_filtration in st.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("filtration[1, 2]=", st.filtration([1, 2]))
print("filtration[4, 2]=", st.filtration([4, 2]))
diff --git a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
index f0df2189..a4b4e9f5 100755
--- a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
+++ b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
@@ -1,11 +1,14 @@
#!/usr/bin/env python
import argparse
-import matplotlib.pyplot as plot
+import errno
+import os
import gudhi
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+ which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+ license details.
Author(s): Vincent Rouvreau
Copyright (C) 2016 Inria
@@ -19,7 +22,7 @@ __copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
parser = argparse.ArgumentParser(
- description="TangentialComplex creation from " "points read in a OFF file.",
+ description="TangentialComplex creation from points read in a OFF file.",
epilog="Example: "
"example/tangential_complex_plain_homology_from_off_file_example.py "
"-f ../data/points/tore3D_300.off -i 3"
@@ -41,10 +44,11 @@ args = parser.parse_args()
with open(args.file, "r") as f:
first_line = f.readline()
if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- print("#####################################################################")
+ print("##############################################################")
print("TangentialComplex creation from points read in a OFF file")
- tc = gudhi.TangentialComplex(intrisic_dim=args.intrisic_dim, off_file=args.file)
+ tc = gudhi.TangentialComplex(intrisic_dim=args.intrisic_dim,
+ off_file=args.file)
tc.compute_tangential_complex()
st = tc.create_simplex_tree()
@@ -57,9 +61,11 @@ with open(args.file, "r") as f:
print(st.betti_numbers())
if args.no_diagram == False:
+ import matplotlib.pyplot as plot
gudhi.plot_persistence_diagram(diag, band=args.band)
plot.show()
else:
- print(args.file, "is not a valid OFF file")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ args.file)
f.close()
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index fff3e920..375e1561 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2016 Inria
@@ -7,16 +9,18 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
from libc.stdint cimport intptr_t
-import os
+import warnings
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
+from gudhi import read_points_from_off_file
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -24,91 +28,137 @@ __license__ = "GPL v3"
cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
cdef cppclass Alpha_complex_interface "Gudhi::alpha_complex::Alpha_complex_interface":
- Alpha_complex_interface(vector[vector[double]] points) except +
- # bool from_file is a workaround for cython to find the correct signature
- Alpha_complex_interface(string off_file, bool from_file) except +
- vector[double] get_point(int vertex) except +
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) except +
+ Alpha_complex_interface(vector[vector[double]] points, vector[double] weights, bool fast_version, bool exact_version) nogil except +
+ vector[double] get_point(int vertex) nogil except +
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square, bool default_filtration_value) nogil except +
+ @staticmethod
+ void set_float_relative_precision(double precision) nogil
+ @staticmethod
+ double get_float_relative_precision() nogil
# AlphaComplex python interface
cdef class AlphaComplex:
- """AlphaComplex is a simplicial complex constructed from the finite cells
- of a Delaunay Triangulation.
+ """AlphaComplex is a simplicial complex constructed from the finite cells of a Delaunay Triangulation.
- The filtration value of each simplex is computed as the square of the
- circumradius of the simplex if the circumsphere is empty (the simplex is
- then said to be Gabriel), and as the minimum of the filtration values of
- the codimension 1 cofaces that make it not Gabriel otherwise.
+ The filtration value of each simplex is computed as the square of the circumradius of the simplex if the
+ circumsphere is empty (the simplex is then said to be Gabriel), and as the minimum of the filtration values of the
+ codimension 1 cofaces that make it not Gabriel otherwise.
- All simplices that have a filtration value strictly greater than a given
- alpha squared value are not inserted into the complex.
+ All simplices that have a filtration value strictly greater than a given alpha squared value are not inserted into
+ the complex.
.. note::
- When Alpha_complex is constructed with an infinite value of alpha, the
- complex is a Delaunay complex.
-
+ When Alpha_complex is constructed with an infinite value of alpha, the complex is a Delaunay complex.
"""
- cdef Alpha_complex_interface * thisptr
+ cdef Alpha_complex_interface * this_ptr
# Fake constructor that does nothing but documenting the constructor
- def __init__(self, points=None, off_file=''):
+ def __init__(self, points=[], off_file='', weights=None, precision='safe'):
"""AlphaComplex constructor.
:param points: A list of points in d-Dimension.
- :type points: list of list of double
-
- Or
+ :type points: Iterable[Iterable[float]]
- :param off_file: An OFF file style name.
+ :param off_file: **[deprecated]** An `OFF file style <fileformats.html#off-file-format>`_ name.
+ If an `off_file` is given with `points` as arguments, only points from the file are taken into account.
:type off_file: string
+
+ :param weights: A list of weights. If set, the number of weights must correspond to the number of points.
+ :type weights: Iterable[float]
+
+ :param precision: Alpha complex precision can be 'fast', 'safe' or 'exact'. Default is 'safe'.
+ :type precision: string
+
+ :raises FileNotFoundError: **[deprecated]** If `off_file` is set but not found.
+ :raises ValueError: In case of inconsistency between the number of points and weights.
"""
# The real cython constructor
- def __cinit__(self, points = None, off_file = ''):
+ def __cinit__(self, points = [], off_file = '', weights=None, precision = 'safe'):
+ assert precision in ['fast', 'safe', 'exact'], "Alpha complex precision can only be 'fast', 'safe' or 'exact'"
+ cdef bool fast = precision == 'fast'
+ cdef bool exact = precision == 'exact'
+
if off_file:
- if os.path.isfile(off_file):
- self.thisptr = new Alpha_complex_interface(off_file.encode('utf-8'), True)
- else:
- print("file " + off_file + " not found.")
- else:
- if points is None:
- # Empty Alpha construction
- points=[]
- self.thisptr = new Alpha_complex_interface(points)
-
+ warnings.warn("off_file is a deprecated parameter, please consider using gudhi.read_points_from_off_file",
+ DeprecationWarning)
+ points = read_points_from_off_file(off_file = off_file)
+
+ # weights are set but is inconsistent with the number of points
+ if weights != None and len(weights) != len(points):
+ raise ValueError("Inconsistency between the number of points and weights")
+
+ # need to copy the points to use them without the gil
+ cdef vector[vector[double]] pts
+ cdef vector[double] wgts
+ pts = points
+ if weights != None:
+ wgts = weights
+ with nogil:
+ self.this_ptr = new Alpha_complex_interface(pts, wgts, fast, exact)
def __dealloc__(self):
- if self.thisptr != NULL:
- del self.thisptr
+ if self.this_ptr != NULL:
+ del self.this_ptr
def __is_defined(self):
"""Returns true if AlphaComplex pointer is not NULL.
"""
- return self.thisptr != NULL
+ return self.this_ptr != NULL
def get_point(self, vertex):
- """This function returns the point corresponding to a given vertex.
+ """This function returns the point corresponding to a given vertex from the :class:`~gudhi.SimplexTree`.
:param vertex: The vertex.
:type vertex: int
:rtype: list of float
:returns: the point.
"""
- return self.thisptr.get_point(vertex)
+ return self.this_ptr.get_point(vertex)
- def create_simplex_tree(self, max_alpha_square = float('inf')):
+ def create_simplex_tree(self, max_alpha_square = float('inf'), default_filtration_value = False):
"""
- :param max_alpha_square: The maximum alpha square threshold the
- simplices shall not exceed. Default is set to infinity, and
- there is very little point using anything else since it does
- not save time.
+ :param max_alpha_square: The maximum alpha square threshold the simplices shall not exceed. Default is set to
+ infinity, and there is very little point using anything else since it does not save time.
:type max_alpha_square: float
+ :param default_filtration_value: Set this value to `True` if filtration values are not needed to be computed
+ (will be set to `NaN`). Default value is `False` (which means compute the filtration values).
+ :type default_filtration_value: bool
:returns: A simplex tree created from the Delaunay Triangulation.
:rtype: SimplexTree
"""
stree = SimplexTree()
+ cdef double mas = max_alpha_square
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, max_alpha_square)
+ cdef bool compute_filtration = default_filtration_value == True
+ with nogil:
+ self.this_ptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
+ mas, compute_filtration)
return stree
+
+ @staticmethod
+ def set_float_relative_precision(precision):
+ """
+ :param precision: When the AlphaComplex is constructed with :code:`precision = 'safe'` (the default),
+ one can set the float relative precision of filtration values computed in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree`.
+ Default is :code:`1e-5` (cf. :func:`~gudhi.AlphaComplex.get_float_relative_precision`).
+ For more details, please refer to
+ `CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double <https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html>`_
+ :type precision: float
+ """
+ if precision <=0. or precision >= 1.:
+ raise ValueError("Relative precision value must be strictly greater than 0 and strictly lower than 1")
+ Alpha_complex_interface.set_float_relative_precision(precision)
+
+ @staticmethod
+ def get_float_relative_precision():
+ """
+ :returns: The float relative precision of filtration values computation in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree` when the AlphaComplex is constructed with
+ :code:`precision = 'safe'` (the default).
+ :rtype: float
+ """
+ return Alpha_complex_interface.get_float_relative_precision()
diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc
new file mode 100644
index 00000000..8a3d669a
--- /dev/null
+++ b/src/python/gudhi/bottleneck.cc
@@ -0,0 +1,55 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Bottleneck.h>
+
+#include <pybind11_diagram_utils.h>
+
+// For compatibility with older versions, we want to support e=None.
+// In C++17, the recommended way is std::optional<double>.
+double bottleneck(Dgm d1, Dgm d2, py::object epsilon)
+{
+ double e = (std::numeric_limits<double>::min)();
+ if (!epsilon.is_none()) e = epsilon.cast<double>();
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, e);
+}
+
+PYBIND11_MODULE(bottleneck, m) {
+ m.attr("__license__") = "GPL v3";
+ m.def("bottleneck_distance", &bottleneck,
+ py::arg("diagram_1"), py::arg("diagram_2"),
+ py::arg("e") = py::none(),
+ R"pbdoc(
+ Compute the Bottleneck distance between two diagrams.
+ Points at infinity and on the diagonal are supported.
+
+ :param diagram_1: The first diagram.
+ :type diagram_1: numpy array of shape (m,2)
+ :param diagram_2: The second diagram.
+ :type diagram_2: numpy array of shape (n,2)
+ :param e: If `e` is 0, this uses an expensive algorithm to compute the
+ exact distance.
+ If `e` is not 0, it asks for an additive `e`-approximation, and
+ currently also allows a small multiplicative error (the last 2 or 3
+ bits of the mantissa may be wrong). This version of the algorithm takes
+ advantage of the limited precision of `double` and is usually a lot
+ faster to compute, whatever the value of `e`.
+ Thus, by default (`e=None`), `e` is the smallest positive double.
+ :type e: float
+ :rtype: float
+ :returns: the bottleneck distance.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx
deleted file mode 100644
index af011e88..00000000
--- a/src/python/gudhi/bottleneck.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Vincent Rouvreau
-#
-# Copyright (C) 2016 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-from cython cimport numeric
-from libcpp.vector cimport vector
-from libcpp.utility cimport pair
-import os
-
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "GPL v3"
-
-cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram":
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double)
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]])
-
-def bottleneck_distance(diagram_1, diagram_2, e=None):
- """This function returns the point corresponding to a given vertex.
-
- :param diagram_1: The first diagram.
- :type diagram_1: vector[pair[double, double]]
- :param diagram_2: The second diagram.
- :type diagram_2: vector[pair[double, double]]
- :param e: If `e` is 0, this uses an expensive algorithm to compute the
- exact distance.
- If `e` is not 0, it asks for an additive `e`-approximation, and
- currently also allows a small multiplicative error (the last 2 or 3
- bits of the mantissa may be wrong). This version of the algorithm takes
- advantage of the limited precision of `double` and is usually a lot
- faster to compute, whatever the value of `e`.
-
- Thus, by default, `e` is the smallest positive double.
- :type e: float
- :rtype: float
- :returns: the bottleneck distance.
- """
- if e is None:
- # Default value is the smallest double value (not 0, 0 is for exact version)
- return bottleneck(diagram_1, diagram_2)
- else:
- # Can be 0 for exact version
- return bottleneck(diagram_1, diagram_2, e)
diff --git a/src/python/gudhi/clustering/__init__.py b/src/python/gudhi/clustering/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/clustering/__init__.py
diff --git a/src/python/gudhi/clustering/_tomato.cc b/src/python/gudhi/clustering/_tomato.cc
new file mode 100644
index 00000000..a76a2c3a
--- /dev/null
+++ b/src/python/gudhi/clustering/_tomato.cc
@@ -0,0 +1,277 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <boost/container/flat_map.hpp>
+#include <boost/pending/disjoint_sets.hpp>
+#include <boost/property_map/property_map.hpp>
+#include <boost/property_map/transform_value_property_map.hpp>
+#include <boost/property_map/vector_property_map.hpp>
+#include <boost/property_map/function_property_map.hpp>
+#include <boost/iterator/counting_iterator.hpp>
+#include <boost/range/irange.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+#include <vector>
+#include <unordered_map>
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+#include <iostream>
+
+namespace py = pybind11;
+
+template <class T, class = std::enable_if_t<std::is_integral<T>::value>>
+int getint(int i) {
+ return i;
+}
+// Gcc-8 has a bug that breaks this version, fixed in gcc-9
+// template<class T, class=decltype(std::declval<T>().template cast<int>())>
+// int getint(T i){return i.template cast<int>();}
+template <class T>
+auto getint(T i) -> decltype(i.template cast<int>()) {
+ return i.template cast<int>();
+}
+
+// Raw clusters are clusters obtained through single-linkage, no merging.
+
+typedef int Point_index;
+typedef int Cluster_index;
+struct Merge {
+ Cluster_index first, second;
+ double persist;
+};
+
+template <class Neighbors, class Density, class Order, class ROrder>
+auto tomato(Point_index num_points, Neighbors const& neighbors, Density const& density, Order const& order,
+ ROrder const& rorder) {
+ // point index --> index of raw cluster it belongs to
+ std::vector<Cluster_index> raw_cluster;
+ raw_cluster.reserve(num_points);
+ // cluster index --> index of top point in the cluster
+ Cluster_index n_raw_clusters = 0; // current number of raw clusters seen
+ //
+ std::vector<Merge> merges;
+ struct Data {
+ Cluster_index parent;
+ int rank;
+ Point_index max;
+ }; // information on a cluster
+ std::vector<Data> ds_base;
+ // boost::vector_property_map does resize(size+1) for every new element, don't use it
+ auto ds_data =
+ boost::make_function_property_map<std::size_t>([&ds_base](std::size_t n) -> Data& { return ds_base[n]; });
+ auto ds_parent =
+ boost::make_transform_value_property_map([](auto& p) -> Cluster_index& { return p.parent; }, ds_data);
+ auto ds_rank = boost::make_transform_value_property_map([](auto& p) -> int& { return p.rank; }, ds_data);
+ boost::disjoint_sets<decltype(ds_rank), decltype(ds_parent)> ds(
+ ds_rank, ds_parent); // on the clusters, not directly the points
+ std::vector<std::array<double, 2>> persistence; // diagram (finite points)
+ boost::container::flat_map<Cluster_index, Cluster_index>
+ adj_clusters; // first: the merged cluster, second: the raw cluster
+ // we only care about the raw cluster, we could use a vector to store the second, store first into a set, and only
+ // insert in the vector if merged is absent from the set
+
+ for (Point_index i = 0; i < num_points; ++i) {
+ // auto&& ngb = neighbors[order[i]];
+ // TODO: have a specialization where we don't need python types and py::cast
+ // TODO: move py::cast and getint into Neighbors
+ py::object ngb = neighbors[py::cast(order[i])]; // auto&& also seems to work
+ adj_clusters.clear();
+ Point_index j = i; // highest neighbor
+ // for(Point_index k : ngb)
+ for (auto k_py : ngb) {
+ Point_index k = rorder[getint(k_py)];
+ if (k >= i || k < 0) // ???
+ continue;
+ if (k < j) j = k;
+ Cluster_index rk = raw_cluster[k];
+ adj_clusters.emplace(ds.find_set(rk), rk);
+ // does not insert if ck=ds.find_set(rk) already seen
+ // which rk we keep from those with the same ck is arbitrary
+ }
+ assert((Point_index)raw_cluster.size() == i);
+ if (i == j) { // local maximum -> new cluster
+ Cluster_index c = n_raw_clusters++;
+ ds_base.emplace_back(); // could be integrated in ds_data, but then we would check the size for every access
+ ds.make_set(c);
+ ds_base[c].max = i; // max
+ raw_cluster.push_back(c);
+ continue;
+ } else { // add i to the cluster of j
+ assert(j < i);
+ raw_cluster.push_back(raw_cluster[j]);
+ // FIXME: we are adding point i to the raw cluster of j, but that one might not be in adj_clusters, so we may
+ // merge clusters A and B through a point of C. It is strange, but I don't know if it can really cause problems.
+ // We could just not set j at all and use arbitrarily the first element of adj_clusters.
+ }
+ // possibly merge clusters
+ // we could sort, in case there are several merges, but that doesn't seem so useful
+ Cluster_index rj = raw_cluster[j];
+ Cluster_index cj = ds.find_set(rj);
+ Cluster_index orig_cj = cj;
+ for (auto ckk : adj_clusters) {
+ Cluster_index rk = ckk.second;
+ Cluster_index ck = ckk.first;
+ if (ck == orig_cj) continue;
+ assert(ck == ds.find_set(rk));
+ Point_index j = ds_base[cj].max;
+ Point_index k = ds_base[ck].max;
+ Point_index young = std::max(j, k);
+ Point_index old = std::min(j, k);
+ auto d_young = density[order[young]];
+ auto d_i = density[order[i]];
+ assert(d_young >= d_i);
+ // Always merge (the non-hierarchical algorithm would only conditionally merge here
+ persistence.push_back({d_young, d_i});
+ assert(ds.find_set(rj) != ds.find_set(rk));
+ ds.link(cj, ck);
+ cj = ds.find_set(cj);
+ ds_base[cj].max = old; // just one parent, no need for find_set
+ // record the raw clusters, we don't know what will have already been merged.
+ merges.push_back({rj, rk, d_young - d_i});
+ }
+ }
+ {
+ boost::counting_iterator<int> b(0), e(ds_base.size());
+ ds.compress_sets(b, e);
+ // Now we stop using find_sets and look at the parent directly
+ // rank is reused to rename clusters contiguously 0, 1, etc
+ }
+ // Maximum for each connected component
+ std::vector<double> max_cc;
+ for (Cluster_index i = 0; i < n_raw_clusters; ++i) {
+ if (ds_base[i].parent == i) max_cc.push_back(density[order[ds_base[i].max]]);
+ }
+ assert((Cluster_index)(merges.size() + max_cc.size()) == n_raw_clusters);
+
+ // TODO: create a "noise" cluster, merging all those not prominent enough?
+
+ // Replay the merges, in increasing order of prominence, to build the hierarchy
+ std::sort(merges.begin(), merges.end(), [](Merge const& a, Merge const& b) { return a.persist < b.persist; });
+ std::vector<std::array<Cluster_index, 2>> children;
+ children.reserve(merges.size());
+ {
+ struct Dat {
+ Cluster_index parent;
+ int rank;
+ Cluster_index name;
+ };
+ std::vector<Dat> ds_bas(2 * n_raw_clusters - 1);
+ Cluster_index i;
+ auto ds_dat =
+ boost::make_function_property_map<std::size_t>([&ds_bas](std::size_t n) -> Dat& { return ds_bas[n]; });
+ auto ds_par = boost::make_transform_value_property_map([](auto& p) -> Cluster_index& { return p.parent; }, ds_dat);
+ auto ds_ran = boost::make_transform_value_property_map([](auto& p) -> int& { return p.rank; }, ds_dat);
+ boost::disjoint_sets<decltype(ds_ran), decltype(ds_par)> ds(ds_ran, ds_par);
+ for (i = 0; i < n_raw_clusters; ++i) {
+ ds.make_set(i);
+ ds_bas[i].name = i;
+ }
+ for (Merge const& m : merges) {
+ Cluster_index j = ds.find_set(m.first);
+ Cluster_index k = ds.find_set(m.second);
+ assert(j != k);
+ children.push_back({ds_bas[j].name, ds_bas[k].name});
+ ds.make_set(i);
+ ds.link(i, j);
+ ds.link(i, k);
+ ds_bas[ds.find_set(i)].name = i;
+ ++i;
+ }
+ }
+
+ std::vector<Cluster_index> raw_cluster_ordered(num_points);
+ for (int i = 0; i < num_points; ++i) raw_cluster_ordered[i] = raw_cluster[rorder[i]];
+ // return raw_cluster, children, persistence
+ // TODO avoid copies: https://github.com/pybind/pybind11/issues/1042
+ return py::make_tuple(py::array(raw_cluster_ordered.size(), raw_cluster_ordered.data()),
+ py::array(children.size(), children.data()), py::array(persistence.size(), persistence.data()),
+ py::array(max_cc.size(), max_cc.data()));
+}
+
+auto merge(py::array_t<Cluster_index, py::array::c_style> children, Cluster_index n_leaves, Cluster_index n_final) {
+ if (n_final > n_leaves) {
+ std::cerr << "The number of clusters required " << n_final << " is larger than the number of mini-clusters " << n_leaves << '\n';
+ n_final = n_leaves; // or return something special and let Tomato use leaf_labels_?
+ }
+ py::buffer_info cbuf = children.request();
+ if ((cbuf.ndim != 2 || cbuf.shape[1] != 2) && (cbuf.ndim != 1 || cbuf.shape[0] != 0))
+ throw std::runtime_error("internal error: children have to be (n,2) or empty");
+ const int n_merges = cbuf.shape[0];
+ Cluster_index* d = (Cluster_index*)cbuf.ptr;
+ if (n_merges + n_final < n_leaves) {
+ std::cerr << "The number of clusters required " << n_final << " is smaller than the number of connected components " << n_leaves - n_merges << '\n';
+ n_final = n_leaves - n_merges;
+ }
+ struct Dat {
+ Cluster_index parent;
+ int rank;
+ int name;
+ };
+ std::vector<Dat> ds_bas(2 * n_leaves - 1);
+ auto ds_dat = boost::make_function_property_map<std::size_t>([&ds_bas](std::size_t n) -> Dat& { return ds_bas[n]; });
+ auto ds_par = boost::make_transform_value_property_map([](auto& p) -> Cluster_index& { return p.parent; }, ds_dat);
+ auto ds_ran = boost::make_transform_value_property_map([](auto& p) -> int& { return p.rank; }, ds_dat);
+ boost::disjoint_sets<decltype(ds_ran), decltype(ds_par)> ds(ds_ran, ds_par);
+ Cluster_index i;
+ for (i = 0; i < n_leaves; ++i) {
+ ds.make_set(i);
+ ds_bas[i].name = -1;
+ }
+ for (Cluster_index m = 0; m < n_leaves - n_final; ++m) {
+ Cluster_index j = ds.find_set(d[2 * m]);
+ Cluster_index k = ds.find_set(d[2 * m + 1]);
+ assert(j != k);
+ ds.make_set(i);
+ ds.link(i, j);
+ ds.link(i, k);
+ ++i;
+ }
+ Cluster_index next_cluster_name = 0;
+ std::vector<Cluster_index> ret;
+ ret.reserve(n_leaves);
+ for (Cluster_index j = 0; j < n_leaves; ++j) {
+ Cluster_index k = ds.find_set(j);
+ if (ds_bas[k].name == -1) ds_bas[k].name = next_cluster_name++;
+ ret.push_back(ds_bas[k].name);
+ }
+ return py::array(ret.size(), ret.data());
+}
+
+// TODO: Do a special version when ngb is a numpy array, where we can cast to int[k][n] ?
+// py::isinstance<py::array_t<std::int32_t>> (ou py::isinstance<py::array> et tester dtype) et flags&c_style
+// ou overload (en virant forcecast?)
+// aussi le faire au cas où on n'aurait pas un tableau, mais où chaque liste de voisins serait un tableau ?
+auto hierarchy(py::handle ngb, py::array_t<double, py::array::c_style | py::array::forcecast> density) {
+ // used to be py::iterable ngb, but that's inconvenient if it doesn't come pre-sorted
+ // use py::handle and check if [] (aka __getitem__) works? But then we need to build an object to pass it to []
+ // (I _think_ handle is ok and we don't need object here)
+ py::buffer_info wbuf = density.request();
+ if (wbuf.ndim != 1) throw std::runtime_error("density must be 1D");
+ const int n = wbuf.shape[0];
+ double* d = (double*)wbuf.ptr;
+ // Vector { 0, 1, ..., n-1 }
+ std::vector<Point_index> order(boost::counting_iterator<Point_index>(0), boost::counting_iterator<Point_index>(n));
+ // Permutation of the indices to get points in decreasing order of density
+ std::sort(std::begin(order), std::end(order), [=](Point_index i, Point_index j) { return d[i] > d[j]; });
+ // Inverse permutation
+ std::vector<Point_index> rorder(n);
+ for (Point_index i : boost::irange(0, n)) rorder[order[i]] = i;
+ // Used as:
+ // order[i] is the index of the point with i-th largest density
+ // rorder[i] is the rank of the i-th point in order of decreasing density
+ // TODO: put a wrapper on ngb and d so we don't need to pass (r)order (there is still the issue of reordering the
+ // output)
+ return tomato(n, ngb, d, order, rorder);
+}
+
+PYBIND11_MODULE(_tomato, m) {
+ m.doc() = "Internals of tomato clustering";
+ m.def("hierarchy", &hierarchy, "does the clustering");
+ m.def("merge", &merge, "merge clusters");
+}
diff --git a/src/python/gudhi/clustering/tomato.py b/src/python/gudhi/clustering/tomato.py
new file mode 100644
index 00000000..d0e9995c
--- /dev/null
+++ b/src/python/gudhi/clustering/tomato.py
@@ -0,0 +1,321 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy
+from ..point_cloud.knn import KNearestNeighbors
+from ..point_cloud.dtm import DTMDensity
+from ._tomato import *
+
+# The fit/predict interface is not so well suited...
+
+
+class Tomato:
+ """
+ This clustering algorithm needs a neighborhood graph on the points, and an estimation of the density at each point.
+ A few possible graph constructions and density estimators are provided for convenience, but it is perfectly natural
+ to provide your own.
+
+ :Requires: `SciPy <installation.html#scipy>`_, `Scikit-learn <installation.html#scikit-learn>`_ or others
+ (see :class:`~gudhi.point_cloud.knn.KNearestNeighbors`) in function of the options.
+
+ Attributes
+ ----------
+ n_clusters_: int
+ The number of clusters. Writing to it automatically adjusts `labels_`.
+ merge_threshold_: float
+ minimum prominence of a cluster so it doesn't get merged. Writing to it automatically adjusts `labels_`.
+ n_leaves_: int
+ number of leaves (unstable clusters) in the hierarchical tree
+ leaf_labels_: ndarray of shape (n_samples,)
+ cluster labels for each point, at the very bottom of the hierarchy
+ labels_: ndarray of shape (n_samples,)
+ cluster labels for each point, after merging
+ diagram_: ndarray of shape (`n_leaves_`, 2)
+ persistence diagram (only the finite points)
+ max_weight_per_cc_: ndarray of shape (n_connected_components,)
+ maximum of the density function on each connected component. This corresponds to the abscissa of infinite
+ points in the diagram
+ children_: ndarray of shape (`n_leaves_`-n_connected_components, 2)
+ The children of each non-leaf node. Values less than `n_leaves_` correspond to leaves of the tree.
+ A node i greater than or equal to `n_leaves_` is a non-leaf node and has children children_[i - `n_leaves_`].
+ Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node `n_leaves_` + i
+ weights_: ndarray of shape (n_samples,)
+ weights of the points, as computed by the density estimator or provided by the user
+ params_: dict
+ Parameters like metric, etc
+ """
+
+ def __init__(
+ self,
+ graph_type="knn",
+ density_type="logDTM",
+ n_clusters=None,
+ merge_threshold=None,
+ # eliminate_threshold=None,
+ # eliminate_threshold (float): minimum max weight of a cluster so it doesn't get eliminated
+ **params
+ ):
+ """
+ Args:
+ graph_type (str): 'manual', 'knn' or 'radius'. Default is 'knn'.
+ density_type (str): 'manual', 'DTM', 'logDTM', 'KDE' or 'logKDE'. When you have many points,
+ 'KDE' and 'logKDE' tend to be slower. Default is 'logDTM'.
+ metric (str|Callable): metric used when calculating the distance between instances in a feature array.
+ Defaults to Minkowski of parameter p.
+ kde_params (dict): if density_type is 'KDE' or 'logKDE', additional parameters passed directly to
+ sklearn.neighbors.KernelDensity.
+ k (int): number of neighbors for a knn graph (including the vertex itself). Defaults to 10.
+ k_DTM (int): number of neighbors for the DTM density estimation (including the vertex itself).
+ Defaults to k.
+ r (float): size of a neighborhood if graph_type is 'radius'. Also used as default bandwidth in kde_params.
+ eps (float): (1+eps) approximation factor when computing distances (ignored in many cases).
+ n_clusters (int): number of clusters requested. Defaults to None, i.e. no merging occurs and we get
+ the maximal number of clusters.
+ merge_threshold (float): minimum prominence of a cluster so it doesn't get merged.
+ symmetrize_graph (bool): whether we should add edges to make the neighborhood graph symmetric.
+ This can be useful with k-NN for small k. Defaults to false.
+ p (float): norm L^p on input points. Defaults to 2.
+ q (float): order used to compute the distance to measure. Defaults to dim.
+ Beware that when the dimension is large, this can easily cause overflows.
+ dim (float): final exponent in DTM density estimation, representing the dimension. Defaults to the
+ dimension, or 2 when the dimension cannot be read from the input (metric is "precomputed").
+ n_jobs (int): Number of jobs to schedule for parallel processing on the CPU.
+ If -1 is given all processors are used. Default: 1.
+ params: extra parameters are passed to :class:`~gudhi.point_cloud.knn.KNearestNeighbors` and
+ :class:`~gudhi.point_cloud.dtm.DTMDensity`.
+ """
+ # Should metric='precomputed' mean input_type='distance_matrix'?
+ # Should we be able to pass metric='minkowski' (what None does currently)?
+ self.graph_type_ = graph_type
+ self.density_type_ = density_type
+ self.params_ = params
+ self.__n_clusters = n_clusters
+ self.__merge_threshold = merge_threshold
+ # self.eliminate_threshold_ = eliminate_threshold
+ if n_clusters and merge_threshold:
+ raise ValueError("Cannot specify both a merge threshold and a number of clusters")
+
+ def fit(self, X, y=None, weights=None):
+ """
+ Args:
+ X ((n,d)-array of float|(n,n)-array of float|Sequence[Iterable[int]]): coordinates of the points,
+ or distance matrix (full, not just a triangle) if metric is "precomputed", or list of neighbors
+ for each point (points are represented by their index, starting from 0) if graph_type is "manual".
+ The number of points is currently limited to about 2 billion.
+ weights (ndarray of shape (n_samples)): if density_type is 'manual', a density estimate at each point
+ y: Not used, present here for API consistency with scikit-learn by convention.
+ """
+ # TODO: First detect if this is a new call with the same data (only threshold changed?)
+ # TODO: less code duplication (subroutines?), less spaghetti, but don't compute neighbors twice if not needed. Clear error message for missing or contradictory parameters.
+ if weights is not None:
+ density_type = "manual"
+ else:
+ density_type = self.density_type_
+ if density_type == "manual":
+ raise ValueError("If density_type is 'manual', you must provide weights to fit()")
+
+ if self.graph_type_ == "manual":
+ self.neighbors_ = X
+ # FIXME: uniformize "message 'option'" vs 'message "option"'
+ assert density_type == "manual", 'If graph_type is "manual", density_type must be as well'
+ else:
+ metric = self.params_.get("metric", "minkowski")
+ if metric != "precomputed":
+ self.points_ = X
+
+ # Slight complication to avoid computing knn twice.
+ need_knn = 0
+ need_knn_ngb = False
+ need_knn_dist = False
+ if self.graph_type_ == "knn":
+ k_graph = self.params_.get("k", 10)
+ # If X has fewer than k points...
+ if k_graph > len(X):
+ k_graph = len(X)
+ need_knn = k_graph
+ need_knn_ngb = True
+ if self.density_type_ in ["DTM", "logDTM"]:
+ k = self.params_.get("k", 10)
+ k_DTM = self.params_.get("k_DTM", k)
+ # If X has fewer than k points...
+ if k_DTM > len(X):
+ k_DTM = len(X)
+ need_knn = max(need_knn, k_DTM)
+ need_knn_dist = True
+ # if we ask for more neighbors for the graph than the DTM, getting the distances is a slight waste,
+ # but it looks negligible
+ if need_knn > 0:
+ knn_args = dict(self.params_)
+ knn_args["k"] = need_knn
+ knn = KNearestNeighbors(return_index=need_knn_ngb, return_distance=need_knn_dist, **knn_args).fit_transform(
+ X
+ )
+ if need_knn_ngb:
+ if need_knn_dist:
+ self.neighbors_ = knn[0][:, 0:k_graph]
+ knn_dist = knn[1]
+ else:
+ self.neighbors_ = knn
+ elif need_knn_dist:
+ knn_dist = knn
+ if self.density_type_ in ["DTM", "logDTM"]:
+ dim = self.params_.get("dim")
+ if dim is None:
+ dim = len(X[0]) if metric != "precomputed" else 2
+ q = self.params_.get("q", dim)
+ weights = DTMDensity(k=k_DTM, metric="neighbors", dim=dim, q=q).fit_transform(knn_dist)
+ if self.density_type_ == "logDTM":
+ weights = numpy.log(weights)
+
+ if self.graph_type_ == "radius":
+ if metric in ["minkowski", "euclidean", "manhattan", "chebyshev"]:
+ from scipy.spatial import cKDTree
+
+ tree = cKDTree(X)
+ # TODO: handle "l1" and "l2" aliases?
+ p = self.params_.get("p")
+ if metric == "euclidean":
+ assert p is None or p == 2, "p=" + str(p) + " is not consistent with metric='euclidean'"
+ p = 2
+ elif metric == "manhattan":
+ assert p is None or p == 1, "p=" + str(p) + " is not consistent with metric='manhattan'"
+ p = 1
+ elif metric == "chebyshev":
+ assert p is None or p == numpy.inf, "p=" + str(p) + " is not consistent with metric='chebyshev'"
+ p = numpy.inf
+ elif p is None:
+ p = 2 # the default
+ eps = self.params_.get("eps", 0)
+ self.neighbors_ = tree.query_ball_tree(tree, r=self.params_["r"], p=p, eps=eps)
+
+ # TODO: sklearn's NearestNeighbors.radius_neighbors can handle more metrics efficiently via its BallTree
+ # (don't bother with the _graph variant, it just calls radius_neighbors).
+ elif metric != "precomputed":
+ from sklearn.metrics import pairwise_distances
+
+ X = pairwise_distances(X, metric=metric, n_jobs=self.params_.get("n_jobs"))
+ metric = "precomputed"
+
+ if metric == "precomputed":
+ # TODO: parallelize? May not be worth it.
+ X = numpy.asarray(X)
+ r = self.params_["r"]
+ self.neighbors_ = [numpy.flatnonzero(l <= r) for l in X]
+
+ if self.density_type_ in {"KDE", "logKDE"}:
+ # Slow...
+ assert (
+ self.graph_type_ != "manual" and metric != "precomputed"
+ ), "Scikit-learn's KernelDensity requires point coordinates"
+ kde_params = dict(self.params_.get("kde_params", dict()))
+ kde_params.setdefault("metric", metric)
+ r = self.params_.get("r")
+ if r is not None:
+ kde_params.setdefault("bandwidth", r)
+ # Should we default rtol to eps?
+ from sklearn.neighbors import KernelDensity
+
+ weights = KernelDensity(**kde_params).fit(self.points_).score_samples(self.points_)
+ if self.density_type_ == "KDE":
+ weights = numpy.exp(weights)
+
+ # TODO: do it at the C++ level and/or in parallel if this is too slow?
+ if self.params_.get("symmetrize_graph"):
+ self.neighbors_ = [set(line) for line in self.neighbors_]
+ for i, line in enumerate(self.neighbors_):
+ line.discard(i)
+ for j in line:
+ self.neighbors_[j].add(i)
+
+ self.weights_ = weights
+ # This is where the main computation happens
+ self.leaf_labels_, self.children_, self.diagram_, self.max_weight_per_cc_ = hierarchy(self.neighbors_, weights)
+ self.n_leaves_ = len(self.max_weight_per_cc_) + len(self.children_)
+ assert self.leaf_labels_.max() + 1 == len(self.max_weight_per_cc_) + len(self.children_)
+ # TODO: deduplicate this code with the setters below
+ if self.__merge_threshold:
+ assert not self.__n_clusters
+ self.__n_clusters = numpy.count_nonzero(
+ self.diagram_[:, 0] - self.diagram_[:, 1] > self.__merge_threshold
+ ) + len(self.max_weight_per_cc_)
+ if self.__n_clusters:
+ # TODO: set corresponding merge_threshold?
+ renaming = merge(self.children_, self.n_leaves_, self.__n_clusters)
+ self.labels_ = renaming[self.leaf_labels_]
+ # In case the user asked for something impossible.
+ # TODO: check for impossible situations before calling merge.
+ self.__n_clusters = self.labels_.max() + 1
+ else:
+ self.labels_ = self.leaf_labels_
+ self.__n_clusters = self.n_leaves_
+ return self
+
+ def fit_predict(self, X, y=None, weights=None):
+ """
+ Equivalent to fit(), and returns the `labels_`.
+ """
+ return self.fit(X, y, weights).labels_
+
+ # TODO: add argument k or threshold? Have a version where you can click and it shows the line and the corresponding k?
+ def plot_diagram(self):
+ """
+ """
+ import matplotlib.pyplot as plt
+
+ l = self.max_weight_per_cc_.min()
+ r = self.max_weight_per_cc_.max()
+ if self.diagram_.size > 0:
+ plt.plot(self.diagram_[:, 0], self.diagram_[:, 1], "o", color="red")
+ l = min(l, self.diagram_[:, 1].min())
+ r = max(r, self.diagram_[:, 0].max())
+ if l == r:
+ if l > 0:
+ l, r = 0.9 * l, 1.1 * r
+ elif l < 0:
+ l, r = 1.1 * l, 0.9 * r
+ else:
+ l, r = -1.0, 1.0
+ plt.plot([l, r], [l, r])
+ plt.plot(
+ self.max_weight_per_cc_, numpy.full(self.max_weight_per_cc_.shape, 1.1 * l - 0.1 * r), "o", color="green"
+ )
+ plt.show()
+
+ # Use set_params instead?
+ @property
+ def n_clusters_(self):
+ return self.__n_clusters
+
+ @n_clusters_.setter
+ def n_clusters_(self, n_clusters):
+ if n_clusters == self.__n_clusters:
+ return
+ self.__n_clusters = n_clusters
+ self.__merge_threshold = None
+ if hasattr(self, "leaf_labels_"):
+ renaming = merge(self.children_, self.n_leaves_, self.__n_clusters)
+ self.labels_ = renaming[self.leaf_labels_]
+ # In case the user asked for something impossible
+ self.__n_clusters = self.labels_.max() + 1
+
+ @property
+ def merge_threshold_(self):
+ return self.__merge_threshold
+
+ @merge_threshold_.setter
+ def merge_threshold_(self, merge_threshold):
+ if merge_threshold == self.__merge_threshold:
+ return
+ if hasattr(self, "leaf_labels_"):
+ self.n_clusters_ = numpy.count_nonzero(self.diagram_[:, 0] - self.diagram_[:, 1] > merge_threshold) + len(
+ self.max_weight_per_cc_
+ )
+ else:
+ self.__n_clusters = None
+ self.__merge_threshold = merge_threshold
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index cbeda014..8e244bb8 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2016 Inria
@@ -7,12 +9,15 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import errno
import os
+import sys
import numpy as np
@@ -22,18 +27,20 @@ __license__ = "MIT"
cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef cppclass Bitmap_cubical_complex_base_interface "Gudhi::Cubical_complex::Cubical_complex_interface<>":
- Bitmap_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells)
- Bitmap_cubical_complex_base_interface(string perseus_file)
- int num_simplices()
- int dimension()
+ Bitmap_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells) nogil
+ Bitmap_cubical_complex_base_interface(string perseus_file) nogil
+ int num_simplices() nogil
+ int dimension() nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<>>":
- Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
+ Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except+
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
# CubicalComplex python interface
cdef class CubicalComplex:
@@ -73,7 +80,7 @@ cdef class CubicalComplex:
perseus_file=''):
if ((dimensions is not None) and (top_dimensional_cells is not None)
and (perseus_file == '')):
- self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+ self._construct_from_cells(dimensions, top_dimensional_cells)
elif ((dimensions is None) and (top_dimensional_cells is not None)
and (perseus_file == '')):
top_dimensional_cells = np.array(top_dimensional_cells,
@@ -81,16 +88,26 @@ cdef class CubicalComplex:
order = 'F')
dimensions = top_dimensional_cells.shape
top_dimensional_cells = top_dimensional_cells.ravel(order='F')
- self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+ self._construct_from_cells(dimensions, top_dimensional_cells)
elif ((dimensions is None) and (top_dimensional_cells is None)
and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8'))
+ self._construct_from_file(perseus_file.encode('utf-8'))
else:
- print("file " + perseus_file + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ perseus_file)
else:
print("CubicalComplex can be constructed from dimensions and "
- "top_dimensional_cells or from a Perseus-style file name.")
+ "top_dimensional_cells or from a Perseus-style file name.",
+ file=sys.stderr)
+
+ def _construct_from_cells(self, vector[unsigned] dimensions, vector[double] top_dimensional_cells):
+ with nogil:
+ self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+
+ def _construct_from_file(self, string filename):
+ with nogil:
+ self.thisptr = new Bitmap_cubical_complex_base_interface(filename)
def __dealloc__(self):
if self.thisptr != NULL:
@@ -122,11 +139,37 @@ cdef class CubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11. Max is 46337.
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ cdef int field = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, 1)
+ self.pcohptr.compute_persistence(field, minp)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -136,30 +179,75 @@ cdef class CubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
- if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+
+ assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()"
+
+ cdef vector[vector[int]] persistence_result
+ output = [[],[]]
+ with nogil:
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1 if len(ess) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1 if len(reg) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
:note: betti_numbers function always returns [1, 0, 0, ...] as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -174,13 +262,11 @@ cdef class CubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -191,13 +277,12 @@ cdef class CubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/datasets/__init__.py b/src/python/gudhi/datasets/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/__init__.py
diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/__init__.py
diff --git a/src/python/gudhi/datasets/generators/_points.cc b/src/python/gudhi/datasets/generators/_points.cc
new file mode 100644
index 00000000..82fea25b
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/_points.cc
@@ -0,0 +1,121 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Hind Montassif
+ *
+ * Copyright (C) 2021 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <gudhi/random_point_generators.h>
+#include <gudhi/Debug_utils.h>
+
+#include <CGAL/Epick_d.h>
+
+namespace py = pybind11;
+
+
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern;
+
+py::array_t<double> generate_points_on_sphere(size_t n_samples, int ambient_dim, double radius, std::string sample) {
+
+ if (sample != "random") {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ py::array_t<double> points({n_samples, (size_t)ambient_dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ GUDHI_CHECK(n_samples == buf.shape[0], "Py array first dimension not matching n_samples on sphere");
+ GUDHI_CHECK(ambient_dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension");
+
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_sphere_d<Kern>(n_samples, ambient_dim, radius);
+ }
+
+ for (size_t i = 0; i < n_samples; i++)
+ for (int j = 0; j < ambient_dim; j++)
+ ptr[i*ambient_dim+j] = points_generated[i][j];
+
+ return points;
+}
+
+py::array_t<double> generate_points_on_torus(size_t n_samples, int dim, std::string sample) {
+
+ if ( (sample != "random") && (sample != "grid")) {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_torus_d<Kern>(n_samples, dim, sample);
+ }
+
+ size_t npoints = points_generated.size();
+
+ GUDHI_CHECK(2*dim == points_generated[0].size(), "Py array second dimension not matching the double torus dimension");
+
+ py::array_t<double> points({npoints, (size_t)2*dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ for (size_t i = 0; i < npoints; i++)
+ for (int j = 0; j < 2*dim; j++)
+ ptr[i*(2*dim)+j] = points_generated[i][j];
+
+ return points;
+}
+
+PYBIND11_MODULE(_points, m) {
+ m.attr("__license__") = "LGPL v3";
+
+ m.def("sphere", &generate_points_on_sphere,
+ py::arg("n_samples"), py::arg("ambient_dim"),
+ py::arg("radius") = 1., py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param ambient_dim: The ambient dimension d.
+ :type ambient_dim: integer
+ :param radius: The radius. Default value is `1.`.
+ :type radius: float
+ :param sample: The sample type. Default and only available value is `"random"`.
+ :type sample: string
+ :returns: the generated points on a sphere.
+ )pbdoc");
+
+ m.def("ctorus", &generate_points_on_torus,
+ py::arg("n_samples"), py::arg("dim"), py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points on a d-torus in R^2d or as a grid
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :type dim: integer
+ :param sample: The sample type. Available values are: `"random"` and `"grid"`. Default value is `"random"`.
+ :type sample: string
+ :returns: the generated points on a torus.
+
+ The shape of returned numpy array is:
+
+ If sample is 'random': (n_samples, 2*dim).
+
+ If sample is 'grid': (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/datasets/generators/points.py b/src/python/gudhi/datasets/generators/points.py
new file mode 100644
index 00000000..9bb2799d
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/points.py
@@ -0,0 +1,59 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+from ._points import ctorus
+from ._points import sphere
+
+def _generate_random_points_on_torus(n_samples, dim):
+
+ # Generate random angles of size n_samples*dim
+ alpha = 2*np.pi*np.random.rand(n_samples*dim)
+
+ # Based on angles, construct points of size n_samples*dim on a circle and reshape the result in a n_samples*2*dim array
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)]).reshape(-1, 2*dim)
+
+ return array_points
+
+def _generate_grid_points_on_torus(n_samples, dim):
+
+ # Generate points on a dim-torus as a grid
+ n_samples_grid = int((n_samples+.5)**(1./dim)) # add .5 to avoid rounding down with numerical approximations
+ alpha = np.linspace(0, 2*np.pi, n_samples_grid, endpoint=False)
+
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)])
+ array_points_idx = np.empty([n_samples_grid]*dim + [dim], dtype=int)
+ for i, x in enumerate(np.ix_(*([np.arange(n_samples_grid)]*dim))):
+ array_points_idx[...,i] = x
+ return array_points[array_points_idx].reshape(-1, 2*dim)
+
+def torus(n_samples, dim, sample='random'):
+ """
+ Generate points on a flat dim-torus in R^2dim either randomly or on a grid
+
+ :param n_samples: The number of points to be generated.
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :param sample: The sample type of the generated points. Can be 'random' or 'grid'.
+ :returns: numpy array containing the generated points on a torus.
+
+ The shape of returned numpy array is:
+
+ If sample is 'random': (n_samples, 2*dim).
+
+ If sample is 'grid': (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ """
+ if sample == 'random':
+ # Generate points randomly
+ return _generate_random_points_on_torus(n_samples, dim)
+ elif sample == 'grid':
+ # Generate points on a grid
+ return _generate_grid_points_on_torus(n_samples, dim)
+ else:
+ raise ValueError("Sample type '{}' is not supported".format(sample))
diff --git a/src/python/gudhi/datasets/remote.py b/src/python/gudhi/datasets/remote.py
new file mode 100644
index 00000000..f6d3fe56
--- /dev/null
+++ b/src/python/gudhi/datasets/remote.py
@@ -0,0 +1,223 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from os.path import join, split, exists, expanduser
+from os import makedirs, remove, environ
+
+from urllib.request import urlretrieve
+import hashlib
+import shutil
+
+import numpy as np
+
+def _get_data_home(data_home = None):
+ """
+ Return the path of the remote datasets directory.
+ This folder is used to store remotely fetched datasets.
+ By default the datasets directory is set to a folder named 'gudhi_data' in the user home folder.
+ Alternatively, it can be set by the 'GUDHI_DATA' environment variable.
+ The '~' symbol is expanded to the user home folder.
+ If the folder does not already exist, it is automatically created.
+
+ Parameters
+ ----------
+ data_home : string
+ The path to remote datasets directory.
+ Default is `None`, meaning that the data home directory will be set to "~/gudhi_data",
+ if the 'GUDHI_DATA' environment variable does not exist.
+
+ Returns
+ -------
+ data_home: string
+ The path to remote datasets directory.
+ """
+ if data_home is None:
+ data_home = environ.get("GUDHI_DATA", join("~", "gudhi_data"))
+ data_home = expanduser(data_home)
+ makedirs(data_home, exist_ok=True)
+ return data_home
+
+
+def clear_data_home(data_home = None):
+ """
+ Delete the data home cache directory and all its content.
+
+ Parameters
+ ----------
+ data_home : string, default is None.
+ The path to remote datasets directory.
+ If `None` and the 'GUDHI_DATA' environment variable does not exist,
+ the default directory to be removed is set to "~/gudhi_data".
+ """
+ data_home = _get_data_home(data_home)
+ shutil.rmtree(data_home)
+
+def _checksum_sha256(file_path):
+ """
+ Compute the file checksum using sha256.
+
+ Parameters
+ ----------
+ file_path: string
+ Full path of the created file including filename.
+
+ Returns
+ -------
+ The hex digest of file_path.
+ """
+ sha256_hash = hashlib.sha256()
+ chunk_size = 4096
+ with open(file_path,"rb") as f:
+ # Read and update hash string value in blocks of 4K
+ while True:
+ buffer = f.read(chunk_size)
+ if not buffer:
+ break
+ sha256_hash.update(buffer)
+ return sha256_hash.hexdigest()
+
+def _fetch_remote(url, file_path, file_checksum = None):
+ """
+ Fetch the wanted dataset from the given url and save it in file_path.
+
+ Parameters
+ ----------
+ url : string
+ The url to fetch the dataset from.
+ file_path : string
+ Full path of the downloaded file including filename.
+ file_checksum : string
+ The file checksum using sha256 to check against the one computed on the downloaded file.
+ Default is 'None', which means the checksum is not checked.
+
+ Raises
+ ------
+ IOError
+ If the computed SHA256 checksum of file does not match the one given by the user.
+ """
+
+ # Get the file
+ urlretrieve(url, file_path)
+
+ if file_checksum is not None:
+ checksum = _checksum_sha256(file_path)
+ if file_checksum != checksum:
+ # Remove file and raise error
+ remove(file_path)
+ raise IOError("{} has a SHA256 checksum : {}, "
+ "different from expected : {}."
+ "The file may be corrupted or the given url may be wrong !".format(file_path, checksum, file_checksum))
+
+def _get_archive_path(file_path, label):
+ """
+ Get archive path based on file_path given by user and label.
+
+ Parameters
+ ----------
+ file_path: string
+ Full path of the file to get including filename, or None.
+ label: string
+ Label used along with 'data_home' to get archive path, in case 'file_path' is None.
+
+ Returns
+ -------
+ Full path of archive including filename.
+ """
+ if file_path is None:
+ archive_path = join(_get_data_home(), label)
+ dirname = split(archive_path)[0]
+ makedirs(dirname, exist_ok=True)
+ else:
+ archive_path = file_path
+ dirname = split(archive_path)[0]
+ makedirs(dirname, exist_ok=True)
+
+ return archive_path
+
+def fetch_spiral_2d(file_path = None):
+ """
+ Load the spiral_2d dataset.
+
+ Note that if the dataset already exists in the target location, it is not downloaded again,
+ and the corresponding array is returned from cache.
+
+ Parameters
+ ----------
+ file_path : string
+ Full path of the downloaded file including filename.
+
+ Default is None, meaning that it's set to "data_home/points/spiral_2d/spiral_2d.npy".
+
+ The "data_home" directory is set by default to "~/gudhi_data",
+ unless the 'GUDHI_DATA' environment variable is set.
+
+ Returns
+ -------
+ points: numpy array
+ Array of shape (114562, 2).
+ """
+ file_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/spiral_2d/spiral_2d.npy"
+ file_checksum = '2226024da76c073dd2f24b884baefbfd14928b52296df41ad2d9b9dc170f2401'
+
+ archive_path = _get_archive_path(file_path, "points/spiral_2d/spiral_2d.npy")
+
+ if not exists(archive_path):
+ _fetch_remote(file_url, archive_path, file_checksum)
+
+ return np.load(archive_path, mmap_mode='r')
+
+def fetch_bunny(file_path = None, accept_license = False):
+ """
+ Load the Stanford bunny dataset.
+
+ This dataset contains 35947 vertices.
+
+ Note that if the dataset already exists in the target location, it is not downloaded again,
+ and the corresponding array is returned from cache.
+
+ Parameters
+ ----------
+ file_path : string
+ Full path of the downloaded file including filename.
+
+ Default is None, meaning that it's set to "data_home/points/bunny/bunny.npy".
+ In this case, the LICENSE file would be downloaded as "data_home/points/bunny/bunny.LICENSE".
+
+ The "data_home" directory is set by default to "~/gudhi_data",
+ unless the 'GUDHI_DATA' environment variable is set.
+
+ accept_license : boolean
+ Flag to specify if user accepts the file LICENSE and prevents from printing the corresponding license terms.
+
+ Default is False.
+
+ Returns
+ -------
+ points: numpy array
+ Array of shape (35947, 3).
+ """
+
+ file_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/bunny/bunny.npy"
+ file_checksum = 'f382482fd89df8d6444152dc8fd454444fe597581b193fd139725a85af4a6c6e'
+ license_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/bunny/bunny.LICENSE"
+ license_checksum = 'b763dbe1b2fc6015d05cbf7bcc686412a2eb100a1f2220296e3b4a644c69633a'
+
+ archive_path = _get_archive_path(file_path, "points/bunny/bunny.npy")
+
+ if not exists(archive_path):
+ _fetch_remote(file_url, archive_path, file_checksum)
+ license_path = join(split(archive_path)[0], "bunny.LICENSE")
+ _fetch_remote(license_url, license_path, license_checksum)
+ # Print license terms unless accept_license is set to True
+ if not accept_license:
+ if exists(license_path):
+ with open(license_path, 'r') as f:
+ print(f.read())
+
+ return np.load(archive_path, mmap_mode='r')
diff --git a/src/python/gudhi/dtm_rips_complex.py b/src/python/gudhi/dtm_rips_complex.py
new file mode 100644
index 00000000..63c9b138
--- /dev/null
+++ b/src/python/gudhi/dtm_rips_complex.py
@@ -0,0 +1,51 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Yuichi Ike, Raphaël Tinarrage
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+
+from gudhi.weighted_rips_complex import WeightedRipsComplex
+from gudhi.point_cloud.dtm import DistanceToMeasure
+from scipy.spatial.distance import cdist
+
+class DTMRipsComplex(WeightedRipsComplex):
+ """
+ Class to generate a DTM Rips complex from a distance matrix or a point set,
+ in the way described in :cite:`dtmfiltrations`.
+ Remark that all the filtration values are doubled compared to the definition in the paper
+ for the consistency with RipsComplex.
+ :Requires: `SciPy <installation.html#scipy>`_
+ """
+ def __init__(self,
+ points=None,
+ distance_matrix=None,
+ k=1,
+ q=2,
+ max_filtration=float('inf')):
+ """
+ Args:
+ points (numpy.ndarray): array of points.
+ distance_matrix (numpy.ndarray): full distance matrix.
+ k (int): number of neighbors for the computation of DTM. Defaults to 1, which is equivalent to the usual Rips complex.
+ q (float): order used to compute the distance to measure. Defaults to 2.
+ max_filtration (float): specifies the maximal filtration value to be considered.
+ """
+ if distance_matrix is None:
+ if points is None:
+ # Empty Rips construction
+ points=[]
+ distance_matrix = cdist(points,points)
+ self.distance_matrix = distance_matrix
+
+ # TODO: address the error when k is too large
+ if k <= 1:
+ self.weights = [0] * len(distance_matrix)
+ else:
+ dtm = DistanceToMeasure(k, q=q, metric="precomputed")
+ self.weights = dtm.fit_transform(distance_matrix)
+ self.max_filtration = max_filtration
+
diff --git a/src/python/gudhi/hera/__init__.py b/src/python/gudhi/hera/__init__.py
new file mode 100644
index 00000000..f70b92b9
--- /dev/null
+++ b/src/python/gudhi/hera/__init__.py
@@ -0,0 +1,7 @@
+from .wasserstein import wasserstein_distance
+from .bottleneck import bottleneck_distance
+
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
diff --git a/src/python/gudhi/hera/bottleneck.cc b/src/python/gudhi/hera/bottleneck.cc
new file mode 100644
index 00000000..ec461f7c
--- /dev/null
+++ b/src/python/gudhi/hera/bottleneck.cc
@@ -0,0 +1,54 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11_diagram_utils.h>
+
+#ifdef _MSC_VER
+// https://github.com/grey-narn/hera/issues/3
+// ssize_t is a non-standard type (well, posix)
+using py::ssize_t;
+#endif
+
+#include <hera/bottleneck.h> // Hera
+
+double bottleneck_distance(Dgm d1, Dgm d2, double delta)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ if (delta == 0)
+ return hera::bottleneckDistExact(diag1, diag2);
+ else
+ return hera::bottleneckDistApprox(diag1, diag2, delta);
+}
+
+PYBIND11_MODULE(bottleneck, m) {
+ m.def("bottleneck_distance", &bottleneck_distance,
+ py::arg("X"), py::arg("Y"),
+ py::arg("delta") = .01,
+ R"pbdoc(
+ Compute the Bottleneck distance between two diagrams.
+ Points at infinity are supported.
+
+ .. note::
+ Points on the diagonal are not supported and must be filtered out before calling this function.
+
+ Parameters:
+ X (n x 2 numpy array): First diagram
+ Y (n x 2 numpy array): Second diagram
+ delta (float): Relative error 1+delta
+
+ Returns:
+ float: (approximate) bottleneck distance d_B(X,Y)
+ )pbdoc");
+}
diff --git a/src/python/gudhi/hera/wasserstein.cc b/src/python/gudhi/hera/wasserstein.cc
new file mode 100644
index 00000000..3516352e
--- /dev/null
+++ b/src/python/gudhi/hera/wasserstein.cc
@@ -0,0 +1,62 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11_diagram_utils.h>
+
+#ifdef _MSC_VER
+// https://github.com/grey-narn/hera/issues/3
+// ssize_t is a non-standard type (well, posix)
+using py::ssize_t;
+#endif
+
+#include <hera/wasserstein.h> // Hera
+
+double wasserstein_distance(
+ Dgm d1, Dgm d2,
+ double wasserstein_power, double internal_p,
+ double delta)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ hera::AuctionParams<double> params;
+ params.wasserstein_power = wasserstein_power;
+ // hera encodes infinity as -1...
+ if(std::isinf(internal_p)) internal_p = hera::get_infinity<double>();
+ params.internal_p = internal_p;
+ params.delta = delta;
+ // The extra parameters are purposely not exposed for now.
+ return hera::wasserstein_dist(diag1, diag2, params);
+}
+
+PYBIND11_MODULE(wasserstein, m) {
+ m.def("wasserstein_distance", &wasserstein_distance,
+ py::arg("X"), py::arg("Y"),
+ py::arg("order") = 1,
+ py::arg("internal_p") = std::numeric_limits<double>::infinity(),
+ py::arg("delta") = .01,
+ R"pbdoc(
+ Compute the Wasserstein distance between two diagrams.
+ Points at infinity are supported.
+
+ Parameters:
+ X (n x 2 numpy array): First diagram
+ Y (n x 2 numpy array): Second diagram
+ order (float): Wasserstein exponent W_q
+ internal_p (float): Internal Minkowski norm L^p in R^2
+ delta (float): Relative error 1+delta
+
+ Returns:
+ float: Approximate Wasserstein distance W_q(X,Y)
+ )pbdoc");
+}
diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx
index 382e71c5..9c89b239 100644
--- a/src/python/gudhi/nerve_gic.pyx
+++ b/src/python/gudhi/nerve_gic.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2018 Inria
@@ -7,11 +9,13 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import errno
import os
from libc.stdint cimport intptr_t
@@ -96,7 +100,8 @@ cdef class CoverComplex:
return self.thisptr != NULL
def set_point_cloud_from_range(self, cloud):
- """ Reads and stores the input point cloud from a vector stored in memory.
+ """ Reads and stores the input point cloud from a vector stored in
+ memory.
:param cloud: Input vector containing the point cloud.
:type cloud: vector[vector[double]]
@@ -104,7 +109,8 @@ cdef class CoverComplex:
return self.thisptr.set_point_cloud_from_range(cloud)
def set_distances_from_range(self, distance_matrix):
- """ Reads and stores the input distance matrix from a vector stored in memory.
+ """ Reads and stores the input distance matrix from a vector stored in
+ memory.
:param distance_matrix: Input vector containing the distance matrix.
:type distance_matrix: vector[vector[double]]
@@ -163,7 +169,8 @@ cdef class CoverComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr)
+ self.thisptr.create_simplex_tree(
+ <Simplex_tree_interface_full_featured*>stree_int_ptr)
return stree
def find_simplices(self):
@@ -182,12 +189,12 @@ cdef class CoverComplex:
if os.path.isfile(off_file):
return self.thisptr.read_point_cloud(off_file.encode('utf-8'))
else:
- print("file " + off_file + " not found.")
- return False
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ off_file)
def set_automatic_resolution(self):
"""Computes the optimal length of intervals (i.e. the smallest interval
- length avoiding discretization artifacts—see :cite:`Carriere17c`) for a
+ length avoiding discretization artifacts - see :cite:`Carriere17c`) for a
functional cover.
:rtype: double
@@ -214,7 +221,8 @@ cdef class CoverComplex:
if os.path.isfile(color_file_name):
self.thisptr.set_color_from_file(color_file_name.encode('utf-8'))
else:
- print("file " + color_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ color_file_name)
def set_color_from_range(self, color):
"""Computes the function used to color the nodes of the simplicial
@@ -235,7 +243,8 @@ cdef class CoverComplex:
if os.path.isfile(cover_file_name):
self.thisptr.set_cover_from_file(cover_file_name.encode('utf-8'))
else:
- print("file " + cover_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ cover_file_name)
def set_cover_from_function(self):
"""Creates a cover C from the preimages of the function f.
@@ -268,7 +277,8 @@ cdef class CoverComplex:
if os.path.isfile(func_file_name):
self.thisptr.set_function_from_file(func_file_name.encode('utf-8'))
else:
- print("file " + func_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ func_file_name)
def set_function_from_range(self, function):
"""Creates the function f from a vector stored in memory.
@@ -288,7 +298,7 @@ cdef class CoverComplex:
def set_graph_from_automatic_rips(self, N=100):
"""Creates a graph G from a Rips complex whose threshold value is
- automatically tuned with subsampling—see.
+ automatically tuned with subsampling - see :cite:`Carriere17c`.
:param N: Number of subsampling iteration (the default reasonable value
is 100, but there is no guarantee on how to choose it).
@@ -302,14 +312,15 @@ cdef class CoverComplex:
"""Creates a graph G from a file containing the edges.
:param graph_file_name: Name of the input graph file. The graph file
- contains one edge per line, each edge being represented by the IDs of
- its two nodes.
+ contains one edge per line, each edge being represented by the IDs
+ of its two nodes.
:type graph_file_name: string
"""
if os.path.isfile(graph_file_name):
self.thisptr.set_graph_from_file(graph_file_name.encode('utf-8'))
else:
- print("file " + graph_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ graph_file_name)
def set_graph_from_OFF(self):
"""Creates a graph G from the triangulation given by the input OFF
diff --git a/src/python/gudhi/off_reader.pyx b/src/python/gudhi/off_reader.pyx
deleted file mode 100644
index 7e6d9d80..00000000
--- a/src/python/gudhi/off_reader.pyx
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Vincent Rouvreau
-#
-# Copyright (C) 2016 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-from cython cimport numeric
-from libcpp.vector cimport vector
-from libcpp.string cimport string
-import os
-
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "MIT"
-
-cdef extern from "Off_reader_interface.h" namespace "Gudhi":
- vector[vector[double]] read_points_from_OFF_file(string off_file)
-
-def read_points_from_off_file(off_file=''):
- """Read points from OFF file.
-
- :param off_file: An OFF file style name.
- :type off_file: string
-
- :returns: The point set.
- :rtype: List[List[float]]
- """
- if off_file:
- if os.path.isfile(off_file):
- return read_points_from_OFF_file(off_file.encode('utf-8'))
- else:
- print("file " + off_file + " not found.")
- return []
-
diff --git a/src/python/gudhi/off_utils.pyx b/src/python/gudhi/off_utils.pyx
new file mode 100644
index 00000000..9276c7b0
--- /dev/null
+++ b/src/python/gudhi/off_utils.pyx
@@ -0,0 +1,62 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from __future__ import print_function
+from cython cimport numeric
+from libcpp.vector cimport vector
+from libcpp.string cimport string
+cimport cython
+import errno
+import os
+import numpy as np
+
+__author__ = "Vincent Rouvreau"
+__copyright__ = "Copyright (C) 2016 Inria"
+__license__ = "MIT"
+
+cdef extern from "Off_reader_interface.h" namespace "Gudhi":
+ vector[vector[double]] read_points_from_OFF_file(string off_file)
+
+def read_points_from_off_file(off_file=''):
+ """Read points from an `OFF file <fileformats.html#off-file-format>`_.
+
+ :param off_file: An OFF file style name.
+ :type off_file: string
+
+ :returns: The point set.
+ :rtype: List[List[float]]
+ """
+ if off_file:
+ if os.path.isfile(off_file):
+ return read_points_from_OFF_file(off_file.encode('utf-8'))
+ else:
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ off_file)
+
+@cython.embedsignature(True)
+def write_points_to_off_file(fname, points):
+ """Write points to an `OFF file <fileformats.html#off-file-format>`_.
+
+ A simple wrapper for `numpy.savetxt`.
+
+ :param fname: Name of the OFF file.
+ :type fname: str or file handle
+ :param points: Point coordinates.
+ :type points: numpy array of shape (n, dim)
+ """
+ points = np.array(points, copy=False)
+ assert len(points.shape) == 2
+ dim = points.shape[1]
+ if dim == 3:
+ head = 'OFF\n{} 0 0'.format(points.shape[0])
+ else:
+ head = 'nOFF\n{} {} 0 0'.format(dim, points.shape[0])
+ np.savetxt(fname, points, header=head, comments='')
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index 37f76201..6c21e902 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -7,11 +7,13 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import sys
import os
import numpy as np
@@ -22,18 +24,20 @@ __license__ = "MIT"
cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_base_interface "Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>":
- Periodic_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions)
- Periodic_cubical_complex_base_interface(string perseus_file)
- int num_simplices()
- int dimension()
+ Periodic_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions) nogil
+ Periodic_cubical_complex_base_interface(string perseus_file) nogil
+ int num_simplices() nogil
+ int dimension() nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>>":
- Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
+ Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
# PeriodicCubicalComplex python interface
cdef class PeriodicCubicalComplex:
@@ -77,9 +81,7 @@ cdef class PeriodicCubicalComplex:
periodic_dimensions=None, perseus_file=''):
if ((dimensions is not None) and (top_dimensional_cells is not None)
and (periodic_dimensions is not None) and (perseus_file == '')):
- self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
- top_dimensional_cells,
- periodic_dimensions)
+ self._construct_from_cells(dimensions, top_dimensional_cells, periodic_dimensions)
elif ((dimensions is None) and (top_dimensional_cells is not None)
and (periodic_dimensions is not None) and (perseus_file == '')):
top_dimensional_cells = np.array(top_dimensional_cells,
@@ -87,20 +89,26 @@ cdef class PeriodicCubicalComplex:
order = 'F')
dimensions = top_dimensional_cells.shape
top_dimensional_cells = top_dimensional_cells.ravel(order='F')
- self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
- top_dimensional_cells,
- periodic_dimensions)
+ self._construct_from_cells(dimensions, top_dimensional_cells, periodic_dimensions)
elif ((dimensions is None) and (top_dimensional_cells is None)
and (periodic_dimensions is None) and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Periodic_cubical_complex_base_interface(perseus_file.encode('utf-8'))
+ self._construct_from_file(perseus_file.encode('utf-8'))
else:
- print("file " + perseus_file + " not found.")
+ print("file " + perseus_file + " not found.", file=sys.stderr)
else:
print("CubicalComplex can be constructed from dimensions, "
"top_dimensional_cells and periodic_dimensions, or from "
"top_dimensional_cells and periodic_dimensions or from "
- "a Perseus-style file name.")
+ "a Perseus-style file name.", file=sys.stderr)
+
+ def _construct_from_cells(self, vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions):
+ with nogil:
+ self.thisptr = new Periodic_cubical_complex_base_interface(dimensions, top_dimensional_cells, periodic_dimensions)
+
+ def _construct_from_file(self, string filename):
+ with nogil:
+ self.thisptr = new Periodic_cubical_complex_base_interface(filename)
def __dealloc__(self):
if self.thisptr != NULL:
@@ -132,11 +140,37 @@ cdef class PeriodicCubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11. Max is 46337.
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ cdef int field = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, 1)
+ self.pcohptr.compute_persistence(field, minp)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -146,30 +180,73 @@ cdef class PeriodicCubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
- if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+ assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()"
+ cdef vector[vector[int]] persistence_result
+
+ output = [[],[]]
+ with nogil:
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1 if len(ess) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1 if len(reg) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
- :note: betti_numbers function always returns [1, 0, 0, ...] as infinity
+ :note: This function always returns the Betti numbers of a torus as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -184,13 +261,11 @@ cdef class PeriodicCubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -201,13 +276,12 @@ cdef class PeriodicCubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 246280de..e438aa66 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -5,19 +5,26 @@
# Copyright (C) 2016 Inria
#
# Modification(s):
+# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility.
# - YYYY/MM Author: Description of the modification
from os import path
from math import isfinite
import numpy as np
+from functools import lru_cache
+import warnings
+import errno
+import os
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
-__author__ = "Vincent Rouvreau, Bertrand Michel"
+__author__ = "Vincent Rouvreau, Bertrand Michel, Theo Lacombe"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
+_gudhi_matplotlib_use_tex = True
+
def __min_birth_max_death(persistence, band=0.0):
"""This function returns (min_birth, max_death) from the persistence.
@@ -41,25 +48,78 @@ def __min_birth_max_death(persistence, band=0.0):
min_birth = float(interval[1][0])
if band > 0.0:
max_death += band
+ # can happen if only points at inf death
+ if min_birth == max_death:
+ max_death = max_death + 1.0
return (min_birth, max_death)
+
+def _array_handler(a):
+ """
+ :param a: if array, assumes it is a (n x 2) np.array and return a
+ persistence-compatible list (padding with 0), so that the
+ plot can be performed seamlessly.
+ """
+ if isinstance(a[0][1], (np.floating, float)):
+ return [[0, x] for x in a]
+ else:
+ return a
+
+
+def _limit_to_max_intervals(persistence, max_intervals, key):
+ """This function returns truncated persistence if length is bigger than max_intervals.
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param max_intervals: maximal number of intervals to display.
+ Selected intervals are those with the longest life time. Set it
+ to 0 to see all. Default value is 1000.
+ :type max_intervals: int.
+ :param key: key function for sort algorithm.
+ :type key: function or lambda.
+ """
+ if max_intervals > 0 and max_intervals < len(persistence):
+ warnings.warn(
+ "There are %s intervals given as input, whereas max_intervals is set to %s."
+ % (len(persistence), max_intervals)
+ )
+ # Sort by life time, then takes only the max_intervals elements
+ return sorted(persistence, key=key, reverse=True)[:max_intervals]
+ else:
+ return persistence
+
+
+@lru_cache(maxsize=1)
+def _matplotlib_can_use_tex():
+ """This function returns True if matplotlib can deal with LaTeX, False otherwise.
+ The returned value is cached.
+ """
+ try:
+ from matplotlib import checkdep_usetex
+
+ return checkdep_usetex(True)
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
+
+
def plot_persistence_barcode(
persistence=[],
persistence_file="",
alpha=0.6,
- max_intervals=1000,
- max_barcodes=1000,
+ max_intervals=20000,
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
):
"""This function plots the persistence bar code from persistence values list
- or from a :doc:`persistence file <fileformats>`.
+ , a np.array of shape (N x 2) (representing a diagram
+ in a single homology dimension),
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: barcode transparency value (0.0 transparent through 1.0
@@ -67,7 +127,7 @@ def plot_persistence_barcode(
:type alpha: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 20000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -81,96 +141,78 @@ def plot_persistence_barcode(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+
+ if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
+ else:
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- if max_barcodes != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_barcodes
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
-
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ (min_birth, max_death) = __min_birth_max_death(persistence)
+ persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence)
- ind = 0
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for bar code to be more
# readable
infinity = max_death + delta
axis_start = min_birth - delta
- # Draw horizontal bars in loop
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.barh(
- ind,
- (interval[1][1] - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- else:
- # Infinite death case for diagram to be nicer
- axes.barh(
- ind,
- (infinity - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- ind = ind + 1
+
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[(death - birth) if death != float("inf") else (infinity - birth) for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.barh(range(len(x)), y, left=x, alpha=alpha, color=c, linewidth=0)
if legend:
- dimensions = list(set(item[0] for item in persistence))
+ dimensions = set(item[0] for item in persistence)
axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ],
- loc="lower right",
+ handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions], loc="lower right",
)
- axes.set_title("Persistence barcode")
+ axes.set_title("Persistence barcode", fontsize=fontsize)
+ axes.set_yticks([])
+ axes.invert_yaxis()
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, 0, ind])
+ if len(x) != 0:
+ axes.set_xlim((axis_start, infinity))
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_diagram(
@@ -178,19 +220,21 @@ def plot_persistence_diagram(
persistence_file="",
alpha=0.6,
band=0.0,
- max_intervals=1000,
- max_plots=1000,
+ max_intervals=1000000,
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=True,
):
"""This function plots the persistence diagram from persistence values
- list or from a :doc:`persistence file <fileformats>`.
+ list, a np.array of shape (N x 2) representing a diagram in a single
+ homology dimension, or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file`.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: plot transparency value (0.0 transparent through 1.0
@@ -200,7 +244,7 @@ def plot_persistence_diagram(
:type band: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 1000000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -214,94 +258,102 @@ def plot_persistence_diagram(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+
+ if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
+ else:
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- if max_plots != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_plots
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ min_birth, max_death = __min_birth_max_death(persistence, band)
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
# readable
infinity = max_death + delta
+ axis_end = max_death + delta / 2
axis_start = min_birth - delta
- # line display of equation : birth = death
- x = np.linspace(axis_start, infinity, 1000)
- # infinity line and text
- axes.plot(x, x, color="k", linewidth=1.0)
- axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha)
- axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha)
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
# bootstrap band
if band > 0.0:
+ x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
-
- # Draw points in loop
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.scatter(
- interval[1][0],
- interval[1][1],
- alpha=alpha,
- color=colormap[interval[0]],
- )
- else:
- # Infinite death case for diagram to be nicer
- axes.scatter(
- interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
+ # lower diag patch
+ if greyblock:
+ axes.add_patch(
+ mpatches.Polygon(
+ [[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]],
+ fill=True,
+ color="lightgrey",
)
+ )
+ # line display of equation : birth = death
+ axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[death if death != float("inf") else infinity for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.scatter(x,y,alpha=alpha,color=c)
+ if float("inf") in (death for (dim,(birth,death)) in persistence):
+ # infinity line and text
+ axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha)
+ # Infinity label
+ yt = axes.get_yticks()
+ yt = yt[np.where(yt < axis_end)] # to avoid plotting ticklabel higher than infinity
+ yt = np.append(yt, infinity)
+ ytl = ["%.3f" % e for e in yt] # to avoid float precision error
+ ytl[-1] = r"$+\infty$"
+ axes.set_yticks(yt)
+ axes.set_yticklabels(ytl)
if legend:
dimensions = list(set(item[0] for item in persistence))
- axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ]
- )
+ axes.legend(handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions])
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, axis_start, infinity + delta])
- axes.set_title("Persistence diagram")
+ axes.axis([axis_start, axis_end, axis_start, infinity + delta / 2])
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_density(
@@ -313,18 +365,26 @@ def plot_persistence_density(
dimension=None,
cmap=None,
legend=False,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=False,
):
"""This function plots the persistence density from persistence
- values list or from a :doc:`persistence file <fileformats>`. Be
- aware that this function does not distinguish the dimension, it is
+ values list, np.array of shape (N x 2) representing a diagram
+ in a single homology dimension,
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
+ Be aware that this function does not distinguish the dimension, it is
up to you to select the required one. This function also does not handle
degenerate data set (scipy correlation matrix inversion can fail).
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>`
- style name (reset persistence if both are set).
+ :Requires: `SciPy <installation.html#scipy>`_
+
+ :param persistence: Persistence intervals values list.
+ Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death))
+ or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_
+ file style name (reset persistence if both are set).
:type persistence_file: string
:param nbins: Evaluate a gaussian kde on a regular grid of nbins x
nbins over data extents (default is 300)
@@ -355,11 +415,25 @@ def plot_persistence_density(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane
+ for nicer rendering. Default False.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
+ import matplotlib.patches as mpatches
from scipy.stats import kde
+ from matplotlib import rc
+
+ if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
+ else:
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if dimension is None:
@@ -370,10 +444,17 @@ def plot_persistence_density(
persistence_file=persistence_file, only_this_dim=dimension
)
else:
- print("file " + persistence_file + " not found.")
- return None
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- if len(persistence) > 0:
+ # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
+ if cmap is None:
+ cmap = plt.cm.hot_r
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+
+ try:
+ # if not read from file but given by an argument
+ persistence = _array_handler(persistence)
persistence_dim = np.array(
[
(dim_interval[1][0], dim_interval[1][1])
@@ -381,52 +462,61 @@ def plot_persistence_density(
if (dim_interval[0] == dimension) or (dimension is None)
]
)
-
- persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
- if max_intervals > 0 and max_intervals < len(persistence_dim):
- # Sort by life time, then takes only the max_intervals elements
+ persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
persistence_dim = np.array(
- sorted(
- persistence_dim,
- key=lambda life_time: life_time[1] - life_time[0],
- reverse=True,
- )[:max_intervals]
+ _limit_to_max_intervals(
+ persistence_dim, max_intervals, key=lambda life_time: life_time[1] - life_time[0]
+ )
)
- # Set as numpy array birth and death (remove undefined values - inf and NaN)
- birth = persistence_dim[:, 0]
- death = persistence_dim[:, 1]
-
- # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
- if cmap is None:
- cmap = plt.cm.hot_r
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ # Set as numpy array birth and death (remove undefined values - inf and NaN)
+ birth = persistence_dim[:, 0]
+ death = persistence_dim[:, 1]
+ birth_min = birth.min()
+ birth_max = birth.max()
+ death_min = death.min()
+ death_max = death.max()
+
+ # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
+ k = kde.gaussian_kde([birth, death], bw_method=bw_method)
+ xi, yi = np.mgrid[
+ birth_min : birth_max : nbins * 1j, death_min : death_max : nbins * 1j,
+ ]
+ zi = k(np.vstack([xi.flatten(), yi.flatten()]))
+ # Make the plot
+ img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap, shading="auto")
+ plot_success = True
+
+ # IndexError on empty diagrams, ValueError on only inf death values
+ except (IndexError, ValueError):
+ birth_min = 0.0
+ birth_max = 1.0
+ death_min = 0.0
+ death_max = 1.0
+ plot_success = False
+ pass
# line display of equation : birth = death
- x = np.linspace(death.min(), birth.max(), 1000)
+ x = np.linspace(death_min, birth_max, 1000)
axes.plot(x, x, color="k", linewidth=1.0)
- # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
- k = kde.gaussian_kde([birth, death], bw_method=bw_method)
- xi, yi = np.mgrid[
- birth.min() : birth.max() : nbins * 1j,
- death.min() : death.max() : nbins * 1j,
- ]
- zi = k(np.vstack([xi.flatten(), yi.flatten()]))
-
- # Make the plot
- img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
+ if greyblock:
+ axes.add_patch(
+ mpatches.Polygon(
+ [[birth_min, birth_min], [death_max, birth_min], [death_max, death_max]],
+ fill=True,
+ color="lightgrey",
+ )
+ )
- if legend:
+ if plot_success and legend:
plt.colorbar(img, ax=axes)
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
- axes.set_title("Persistence density")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence density", fontsize=fontsize)
+
return axes
- except ImportError:
- print(
- "This function is not available, you may be missing matplotlib and/or scipy."
- )
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
diff --git a/src/python/gudhi/point_cloud/__init__.py b/src/python/gudhi/point_cloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/point_cloud/__init__.py
diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py
new file mode 100644
index 00000000..55ac58e6
--- /dev/null
+++ b/src/python/gudhi/point_cloud/dtm.py
@@ -0,0 +1,179 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .knn import KNearestNeighbors
+import numpy as np
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class DistanceToMeasure:
+ """
+ Class to compute the distance to the empirical measure defined by a point set, as introduced in :cite:`dtm`.
+ """
+
+ def __init__(self, k, q=2, **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ q (float): order used to compute the distance to measure. Defaults to 2.
+ kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that
+ metric="neighbors" means that :func:`transform` expects an array with the distances
+ to the k nearest neighbors.
+ """
+ self.k = k
+ self.q = q
+ self.params = kwargs
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for mass points.
+ """
+ if self.params.setdefault("metric", "euclidean") != "neighbors":
+ self.knn = KNearestNeighbors(
+ self.k, return_index=False, return_distance=True, sort_results=False, **self.params
+ )
+ self.knn.fit(X)
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed",
+ or distances to the k nearest neighbors if metric is "neighbors" (if the array has more
+ than k columns, the remaining ones are ignored).
+
+ Returns:
+ numpy.array: a 1-d array with, for each point of X, its distance to the measure defined
+ by the argument of :func:`fit`.
+ """
+ if self.params["metric"] == "neighbors":
+ distances = X[:, : self.k]
+ else:
+ distances = self.knn.transform(X)
+ distances = distances ** self.q
+ dtm = distances.sum(-1) / self.k
+ dtm = dtm ** (1.0 / self.q)
+ # We compute too many powers, 1/p in knn then q in dtm, 1/q in dtm then q or some log in the caller.
+ # Add option to skip the final root?
+ return dtm
+
+
+class DTMDensity:
+ """
+ Density estimator based on the distance to the empirical measure defined by a point set, as defined
+ in :cite:`dtmdensity`. Note that this implementation only renormalizes when asked, and the renormalization
+ only works for a Euclidean metric, so in other cases the total measure may not be 1.
+
+ .. note:: When the dimension is high, using it as an exponent can quickly lead to under- or overflows.
+ We recommend using a small fixed value instead in those cases, even if it won't have the same nice
+ theoretical properties as the dimension.
+ """
+
+ def __init__(self, k=None, weights=None, q=None, dim=None, normalize=False, n_samples=None, **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself). Optional if it can be guessed
+ from weights or metric="neighbors".
+ weights (numpy.array): weights of each of the k neighbors, optional. They are supposed to sum to 1.
+ q (float): order used to compute the distance to measure. Defaults to dim.
+ dim (float): final exponent representing the dimension. Defaults to the dimension, and must be specified
+ when the dimension cannot be read from the input (metric is "neighbors" or "precomputed").
+ normalize (bool): normalize the density so it corresponds to a probability measure on ℝᵈ.
+ Only available for the Euclidean metric, defaults to False.
+ n_samples (int): number of sample points used for fitting. Only needed if `normalize` is True and
+ metric is "neighbors".
+ kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that
+ metric="neighbors" means that :func:`transform` expects an array with the distances to
+ the k nearest neighbors.
+ """
+ if weights is None:
+ self.k = k
+ if k is None:
+ assert kwargs.get("metric") == "neighbors", 'Must specify k or weights, unless metric is "neighbors"'
+ self.weights = None
+ else:
+ self.weights = np.full(k, 1.0 / k)
+ else:
+ self.weights = weights
+ self.k = len(weights)
+ assert k is None or k == self.k, "k differs from the length of weights"
+ self.q = q
+ self.dim = dim
+ self.params = kwargs
+ self.normalize = normalize
+ self.n_samples = n_samples
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for mass points.
+ """
+ if self.params.setdefault("metric", "euclidean") != "neighbors":
+ self.knn = KNearestNeighbors(
+ self.k, return_index=False, return_distance=True, sort_results=False, **self.params
+ )
+ self.knn.fit(X)
+ if self.params["metric"] != "precomputed":
+ self.n_samples = len(X)
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed",
+ or distances to the k nearest neighbors if metric is "neighbors" (if the array has more
+ than k columns, the remaining ones are ignored).
+ """
+ q = self.q
+ dim = self.dim
+ if dim is None:
+ assert self.params["metric"] not in {
+ "neighbors",
+ "precomputed",
+ }, "dim not specified and cannot guess the dimension"
+ dim = len(X[0])
+ if q is None:
+ q = dim
+ k = self.k
+ weights = self.weights
+ if self.params["metric"] == "neighbors":
+ distances = np.asarray(X)
+ if weights is None:
+ k = distances.shape[1]
+ weights = np.full(k, 1.0 / k)
+ else:
+ distances = distances[:, :k]
+ else:
+ distances = self.knn.transform(X)
+ distances = distances ** q
+ dtm = (distances * weights).sum(-1)
+ if self.normalize:
+ dtm /= (np.arange(1, k + 1) ** (q / dim) * weights).sum()
+ density = dtm ** (-dim / q)
+ if self.normalize:
+ import math
+
+ if self.params["metric"] == "precomputed":
+ self.n_samples = len(X[0])
+ # Volume of d-ball
+ Vd = math.pi ** (dim / 2) / math.gamma(dim / 2 + 1)
+ density /= self.n_samples * Vd
+ return density
+ # We compute too many powers, 1/p in knn then q in dtm, d/q in dtm then whatever in the caller.
+ # Add option to skip the final root?
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
new file mode 100644
index 00000000..7dc83817
--- /dev/null
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -0,0 +1,344 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy
+import warnings
+
+# TODO: https://github.com/facebookresearch/faiss
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class KNearestNeighbors:
+ """
+ Class wrapping several implementations for computing the k nearest neighbors in a point set.
+
+ :Requires: `PyKeOps <installation.html#pykeops>`_, `SciPy <installation.html#scipy>`_,
+ `Scikit-learn <installation.html#scikit-learn>`_, and/or `Hnswlib <installation.html#hnswlib>`_
+ in function of the selected `implementation`.
+ """
+
+ def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ return_index (bool): if True, return the index of each neighbor.
+ return_distance (bool): if True, return the distance to each neighbor.
+ implementation (str): choice of the library that does the real work.
+
+ * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes large (10+) but the number of points remains low (less than a million). Only "minkowski" and its aliases are supported.
+ * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported.
+ * 'sklearn' for scikit-learn's NearestNeighbors. Note that this provides in particular an option algorithm="brute".
+ * 'hnsw' for hnswlib.Index. It can be very fast but does not provide guarantees. Only supports "euclidean" for now.
+ * None will try to select a sensible one (scipy if possible, scikit-learn otherwise).
+ metric (str): see `sklearn.neighbors.NearestNeighbors`.
+ eps (float): relative error when computing nearest neighbors with the cKDTree.
+ p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2.
+ n_jobs (int): number of jobs to schedule for parallel processing of nearest neighbors on the CPU.
+ If -1 is given all processors are used. Default: 1.
+ sort_results (bool): if True, then distances and indices of each point are
+ sorted on return, so that the first column contains the closest points.
+ Otherwise, neighbors are returned in an arbitrary order. Defaults to True.
+ enable_autodiff (bool): if the input is a torch.tensor or tensorflow.Tensor, this
+ instructs the function to compute distances in a way that works with automatic differentiation.
+ This is experimental, not supported for all metrics, and requires the package EagerPy.
+ Defaults to False.
+ kwargs: additional parameters are forwarded to the backends.
+ """
+ self.k = k
+ self.return_index = return_index
+ self.return_distance = return_distance
+ self.metric = metric
+ self.params = kwargs
+ # canonicalize
+ if metric == "euclidean":
+ self.params["p"] = 2
+ self.metric = "minkowski"
+ elif metric == "manhattan":
+ self.params["p"] = 1
+ self.metric = "minkowski"
+ elif metric == "chebyshev":
+ self.params["p"] = numpy.inf
+ self.metric = "minkowski"
+ elif metric == "minkowski":
+ self.params["p"] = kwargs.get("p", 2)
+ if self.params.get("implementation") in {"keops", "ckdtree"}:
+ assert self.metric == "minkowski"
+ if self.params.get("implementation") == "hnsw":
+ assert self.metric == "minkowski" and self.params["p"] == 2
+ if not self.params.get("implementation"):
+ if self.metric == "minkowski":
+ self.params["implementation"] = "ckdtree"
+ else:
+ self.params["implementation"] = "sklearn"
+ if not return_distance:
+ self.params["enable_autodiff"] = False
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for reference points.
+ """
+ self.ref_points = X
+ if self.params.get("enable_autodiff", False):
+ import eagerpy as ep
+
+ X = ep.astensor(X)
+ if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor):
+ # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch
+ # without copying to/from the CPU.
+ X = X.numpy()
+ if self.params["implementation"] == "ckdtree":
+ # sklearn could handle this, but it is much slower
+ from scipy.spatial import cKDTree
+
+ self.kdtree = cKDTree(X)
+
+ if self.params["implementation"] == "sklearn" and self.metric != "precomputed":
+ # FIXME: sklearn badly handles "precomputed"
+ from sklearn.neighbors import NearestNeighbors
+
+ nargs = {
+ k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}
+ }
+ self.nn = NearestNeighbors(n_neighbors=self.k, metric=self.metric, **nargs)
+ self.nn.fit(X)
+
+ if self.params["implementation"] == "hnsw":
+ import hnswlib
+
+ self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances
+ self.graph.init_index(
+ len(X), **{k: v for k, v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}
+ )
+ n = self.params.get("num_threads")
+ if n is None:
+ n = self.params.get("n_jobs", 1)
+ self.params["num_threads"] = n
+ self.graph.add_items(X, num_threads=n)
+
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed".
+
+ Returns:
+ numpy.array: if return_index, an array of shape (len(X), k) with the indices (in the argument
+ of :func:`fit`) of the k nearest neighbors to the points of X. If return_distance, an array of the
+ same shape with the distances to those neighbors. If both, a tuple with the two arrays, in this order.
+ """
+ if self.params.get("enable_autodiff", False):
+ # pykeops does not support autodiff for kmin yet, but when it does in the future,
+ # we may want a special path.
+ import eagerpy as ep
+
+ save_return_index = self.return_index
+ self.return_index = True
+ self.return_distance = False
+ self.params["enable_autodiff"] = False
+ try:
+ newX = ep.astensor(X)
+ if self.params["implementation"] != "keops" or (
+ not isinstance(newX, ep.PyTorchTensor) and not isinstance(newX, ep.NumPyTensor)
+ ):
+ newX = newX.numpy()
+ else:
+ newX = newX.raw
+ neighbors = self.transform(newX)
+ finally:
+ self.return_index = save_return_index
+ self.return_distance = True
+ self.params["enable_autodiff"] = True
+ # We can implement more later as needed
+ assert self.metric == "minkowski"
+ p = self.params["p"]
+ Y = ep.astensor(self.ref_points)
+ neighbor_pts = Y[
+ neighbors,
+ ]
+ diff = neighbor_pts - X[:, None, :]
+ if isinstance(diff, ep.PyTorchTensor):
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ distances = ep.astensor(diff.raw.norm(p, -1))
+ else:
+ distances = diff.norms.lp(p, -1)
+ if self.return_index:
+ return neighbors, distances.raw
+ else:
+ return distances.raw
+
+ metric = self.metric
+ k = self.k
+
+ if metric == "precomputed":
+ # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural.
+ if self.return_index:
+ n_jobs = self.params.get("n_jobs", 1)
+ # Supposedly numpy can be compiled with OpenMP and handle this, but nobody does that?!
+ if n_jobs == 1:
+ neighbors = numpy.argpartition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results", True):
+ X = numpy.take_along_axis(X, neighbors, axis=-1)
+ ngb_order = numpy.argsort(X, axis=-1)
+ neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+ else:
+ ngb_order = neighbors
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, ngb_order, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ if self.params.get("sort_results", True):
+
+ def func(M):
+ neighbors = numpy.argpartition(M, k - 1)[:, 0:k]
+ Y = numpy.take_along_axis(M, neighbors, axis=-1)
+ ngb_order = numpy.argsort(Y, axis=-1)
+ return numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+
+ else:
+
+ def func(M):
+ return numpy.argpartition(M, k - 1)[:, 0:k]
+
+ neighbors = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, neighbors, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ n_jobs = self.params.get("n_jobs", 1)
+ if n_jobs == 1:
+ distances = numpy.partition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results"):
+ # partition is not guaranteed to sort the lower half, although it often does
+ distances.sort(axis=-1)
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ if self.params.get("sort_results"):
+
+ def func(M):
+ # Not partitioning in place, because we should not modify the user's array?
+ r = numpy.partition(M, k - 1)[:, 0:k]
+ r.sort(axis=-1)
+ return r
+
+ else:
+ func = lambda M: numpy.partition(M, k - 1)[:, 0:k]
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ return distances
+ return None
+
+ if self.params["implementation"] == "hnsw":
+ ef = self.params.get("ef")
+ if ef is not None:
+ self.graph.set_ef(ef)
+ neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"])
+ with warnings.catch_warnings():
+ if not(numpy.all(numpy.isfinite(distances))):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
+ # The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn,
+ # which returns a priority_queue, and then fills the return array backwards with top/pop on the queue.
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, numpy.sqrt(distances)
+ else:
+ return neighbors
+ if self.return_distance:
+ return numpy.sqrt(distances)
+ return None
+
+ if self.params["implementation"] == "keops":
+ import torch
+ from pykeops.torch import LazyTensor
+
+ # 'float64' is slow except on super expensive GPUs. Allow it with some param?
+ XX = torch.as_tensor(X, dtype=torch.float32)
+ if X is self.ref_points:
+ YY = XX
+ else:
+ YY = torch.as_tensor(self.ref_points, dtype=torch.float32)
+ p = self.params["p"]
+ if p == numpy.inf:
+ # Requires pykeops 1.4 or later
+ mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1)
+ elif p == 2: # Any even integer?
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1)
+ else:
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1)
+
+ if self.return_index:
+ if self.return_distance:
+ distances, neighbors = mat.Kmin_argKmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return neighbors, distances
+ else:
+ neighbors = mat.argKmin(k, dim=1)
+ return neighbors
+ if self.return_distance:
+ distances = mat.Kmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return distances
+ return None
+
+ if self.params["implementation"] == "ckdtree":
+ qargs = {key: val for key, val in self.params.items() if key in {"p", "eps"}}
+ # SciPy renamed n_jobs to workers
+ qargs["workers"] = self.params.get("workers") or self.params.get("n_jobs") or 1
+ distances, neighbors = self.kdtree.query(X, k=self.k, **qargs)
+ if k == 1:
+ # SciPy decided to squeeze the last dimension for k=1
+ distances = distances[:, None]
+ neighbors = neighbors[:, None]
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ return distances
+ return None
+
+ assert self.params["implementation"] == "sklearn"
+ if self.return_distance:
+ distances, neighbors = self.nn.kneighbors(X, return_distance=True)
+ if self.return_index:
+ return neighbors, distances
+ else:
+ return distances
+ if self.return_index:
+ neighbors = self.nn.kneighbors(X, return_distance=False)
+ return neighbors
+ return None
diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py
new file mode 100644
index 00000000..5292e752
--- /dev/null
+++ b/src/python/gudhi/point_cloud/timedelay.py
@@ -0,0 +1,94 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Martin Royer, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 Fujitsu Laboratories Ltd.
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+
+class TimeDelayEmbedding:
+ """Point cloud transformation class. Embeds time-series data in the R^d according to
+ `Takens' Embedding Theorem <https://en.wikipedia.org/wiki/Takens%27s_theorem>`_ and obtains the
+ coordinates of each point.
+
+ Parameters
+ ----------
+ dim : int, optional (default=3)
+ `d` of R^d to be embedded.
+ delay : int, optional (default=1)
+ Time-Delay embedding.
+ skip : int, optional (default=1)
+ How often to skip embedded points.
+
+ Example
+ -------
+
+ Given delay=3 and skip=2, a point cloud which is obtained by embedding
+ a scalar time-series into R^3 is as follows::
+
+ time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ point cloud = [[1, 4, 7],
+ [3, 6, 9]]
+
+ Given delay=1 and skip=1, a point cloud which is obtained by embedding
+ a 2D vector time-series data into R^4 is as follows::
+
+ time-series = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
+ point cloud = [[0, 1, 2, 3],
+ [2, 3, 4, 5],
+ [4, 5, 6, 7],
+ [6, 7, 8, 9]]
+ """
+
+ def __init__(self, dim=3, delay=1, skip=1):
+ self._dim = dim
+ self._delay = delay
+ self._skip = skip
+
+ def __call__(self, ts):
+ """Transform method for single time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[float] or Iterable[Iterable[float]]
+ A single time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point cloud : n x dim numpy arrays
+ Makes point cloud from a single time-series data.
+ """
+ return self._transform(np.array(ts))
+
+ def fit(self, ts, y=None):
+ return self
+
+ def _transform(self, ts):
+ """Guts of transform method."""
+ if ts.ndim == 1:
+ repeat = self._dim
+ else:
+ assert self._dim % ts.shape[1] == 0
+ repeat = self._dim // ts.shape[1]
+ end = len(ts) - self._delay * (repeat - 1)
+ short = np.arange(0, end, self._skip)
+ vertical = np.arange(0, repeat * self._delay, self._delay)
+ return ts[np.add.outer(short, vertical)].reshape(len(short), -1)
+
+ def transform(self, ts):
+ """Transform method for multiple time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[Iterable[float]] or Iterable[Iterable[Iterable[float]]]
+ Multiple time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point clouds : list of n x dim numpy arrays
+ Makes point cloud from each time-series data.
+ """
+ return [self._transform(np.array(s)) for s in ts]
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
index bfc83aff..23fd23c7 100644
--- a/src/python/gudhi/representations/kernel_methods.py
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -9,27 +9,100 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.metrics import pairwise_distances
-from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+from sklearn.metrics import pairwise_distances, pairwise_kernels
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, _pairwise, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance
+from .preprocessing import Padding
#############################################
# Kernel methods ############################
#############################################
+def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence weighted Gaussian kernel value between persistence diagrams.
+ """
+ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))])
+ ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))])
+ if kernel_approx is not None:
+ approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0)
+ approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0)
+ return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T)
+ else:
+ W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth))
+ return np.sum(np.multiply(W, E))
+
+def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence scale space kernel value between persistence diagrams.
+ """
+ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0)
+ DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0)
+ weight_pss = lambda x: 1 if x[1] >= x[0] else -1
+ return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth)
+
+def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", n_jobs=None, **kwargs):
+ """
+ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only.
+ kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric.
+ n_jobs (int): number of jobs to use for the computation. This uses joblib.Parallel(prefer="threads"), so kernels that do not release the GIL may not scale unless run inside a `joblib.parallel_backend <https://joblib.readthedocs.io/en/latest/parallel.html#joblib.parallel_backend>`_ block.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): kernel matrix.
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None or Y is X else np.reshape(np.arange(len(Y)), [-1,1])
+ if kernel == "sliced_wasserstein":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"], n_jobs=n_jobs) / kwargs["bandwidth"])
+ elif kernel == "persistence_fisher":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"], n_jobs=n_jobs) / kwargs["bandwidth_fisher"])
+ elif kernel == "persistence_scale_space":
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs), n_jobs=n_jobs)
+ elif kernel == "persistence_weighted_gaussian":
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs), n_jobs=n_jobs)
+ else:
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(metric, **kwargs), n_jobs=n_jobs)
+
class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
"""
- def __init__(self, num_directions=10, bandwidth=1.0):
+ def __init__(self, num_directions=10, bandwidth=1.0, n_jobs=None):
"""
Constructor for the SlicedWassersteinKernel class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel applied to the sliced Wasserstein distance (default 1.).
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth = bandwidth
- self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+ self.num_directions = num_directions
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -39,7 +112,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sw_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -52,13 +125,26 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
"""
- return np.exp(-self.sw_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein kernel value.
+ """
+ return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth
class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence weighted Gaussian kernel matrix from a list of persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
"""
- def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None):
+ def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceWeightedGaussianKernel class.
@@ -66,9 +152,11 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth, self.weight = bandwidth, weight
self.kernel_approx = kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -78,10 +166,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
- if self.kernel_approx is not None:
- self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -94,45 +179,36 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
"""
- Xp = list(X)
- Xfit = np.zeros((len(Xp), len(self.diagrams_)))
- if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
- if self.kernel_approx is not None:
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
- else:
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
- Xfit[j,i] = Xfit[i,j]
- else:
- ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
- if self.kernel_approx is not None:
- approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
- else:
- for i in range(len(Xp)):
- for j in range(len(self.diagrams_)):
- W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
-
- return Xfit
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence weighted Gaussian kernel value.
+ """
+ return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth)
class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence scale space kernel matrix from a list of persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
"""
- def __init__(self, bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceScaleSpaceKernel class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
- self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -142,11 +218,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- for i in range(len(self.diagrams_)):
- op_D = self.diagrams_[i][:,[1,0]]
- self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
- self.pwg_.fit(X)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -159,17 +231,26 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
"""
- Xp = list(X)
- for i in range(len(Xp)):
- op_X = Xp[i][:,[1,0]]
- Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
- return self.pwg_.transform(Xp)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence scale space kernel value.
+ """
+ return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence Fisher kernel matrix from a list of persistence diagrams. The persistence Fisher kernel is computed by exponentiating the corresponding persistence Fisher distance with a Gaussian kernel. See papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
"""
- def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceFisherKernel class.
@@ -177,9 +258,11 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel applied to the persistence Fisher distance (default 1.).
bandwidth_fisher (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions by PersistenceFisherDistance class (default 1.).
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth = bandwidth
- self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+ self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -189,7 +272,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.pf_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -202,5 +285,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
"""
- return np.exp(-self.pf_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher kernel value.
+ """
+ return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
index 5f9ec6ab..142ddef1 100644
--- a/src/python/gudhi/representations/metrics.py
+++ b/src/python/gudhi/representations/metrics.py
@@ -10,31 +10,198 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import pairwise_distances
-try:
- from .. import bottleneck_distance
- USE_GUDHI = True
-except ImportError:
- USE_GUDHI = False
- print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+from gudhi.hera import wasserstein_distance as hera_wasserstein_distance
+from .preprocessing import Padding
+from joblib import Parallel, delayed
#############################################
# Metrics ###################################
#############################################
+def _sliced_wasserstein_distance(D1, D2, num_directions):
+ """
+ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ float: the sliced Wasserstein distance between persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ approx1 = np.matmul(D1, lines)
+ approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines)
+ approx2 = np.matmul(D2, lines)
+ approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines)
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _compute_persistence_diagram_projections(X, num_directions):
+ """
+ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): list of persistence diagrams.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X]
+ return XX
+
+def _sliced_wasserstein_distance_on_projections(D1, D2):
+ """
+ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections.
+ D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections.
+
+ Returns:
+ float: the sliced Wasserstein distance between the projected persistence diagrams.
+ """
+ lim1, lim2 = int(len(D1)/2), int(len(D2)/2)
+ approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:]
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions.
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence Fisher distance between persistence diagrams.
+ """
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections1 = np.matmul(D1, projection)
+ diagonal_projections2 = np.matmul(D2, projection)
+ if kernel_approx is not None:
+ approx1 = kernel_approx.transform(D1)
+ approx_diagonal1 = kernel_approx.transform(diagonal_projections1)
+ approx2 = kernel_approx.transform(D2)
+ approx_diagonal2 = kernel_approx.transform(diagonal_projections2)
+ Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0)
+ U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0)
+ U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+
+def _pairwise(fallback, skipdiag, X, Y, metric, n_jobs):
+ if Y is not None:
+ return fallback(X, Y, metric=metric, n_jobs=n_jobs)
+ triu = np.triu_indices(len(X), k=skipdiag)
+ tril = (triu[1], triu[0])
+ par = Parallel(n_jobs=n_jobs, prefer="threads")
+ d = par(delayed(metric)([triu[0][i]], [triu[1][i]]) for i in range(len(triu[0])))
+ m = np.empty((len(X), len(X)))
+ m[triu] = d
+ m[tril] = d
+ if skipdiag:
+ np.fill_diagonal(m, 0)
+ return m
+
+def _sklearn_wrapper(metric, X, Y, **kwargs):
+ """
+ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments.
+ """
+ if Y is None:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], X[int(b[0])], **kwargs)
+ else:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], Y[int(b[0])], **kwargs)
+ return flat_metric
+
+PAIRWISE_DISTANCE_FUNCTIONS = {
+ "wasserstein": hera_wasserstein_distance,
+ "hera_wasserstein": hera_wasserstein_distance,
+ "persistence_fisher": _persistence_fisher_distance,
+}
+
+def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", n_jobs=None, **kwargs):
+ """
+ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only.
+ metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays.
+ n_jobs (int): number of jobs to use for the computation. This uses joblib.Parallel(prefer="threads"), so metrics that do not release the GIL may not scale unless run inside a `joblib.parallel_backend <https://joblib.readthedocs.io/en/latest/parallel.html#joblib.parallel_backend>`_ block.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): distance matrix
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None or Y is X else np.reshape(np.arange(len(Y)), [-1,1])
+ if metric == "bottleneck":
+ try:
+ from .. import bottleneck_distance
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs), n_jobs=n_jobs)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
+ elif metric == "pot_wasserstein":
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs), n_jobs=n_jobs)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
+ elif metric == "sliced_wasserstein":
+ Xproj = _compute_persistence_diagram_projections(X, **kwargs)
+ Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs)
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj), n_jobs=n_jobs)
+ elif type(metric) == str:
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs), n_jobs=n_jobs)
+ else:
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs), n_jobs=n_jobs)
+
class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
"""
- def __init__(self, num_directions=10):
+ def __init__(self, num_directions=10, n_jobs=None):
"""
Constructor for the SlicedWassersteinDistance class.
Parameters:
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.num_directions = num_directions
- thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
- self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -45,9 +212,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- diag_proj = (1./2) * np.ones((2,2))
- self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
return self
def transform(self, X):
@@ -60,40 +224,37 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
"""
- Xfit = np.zeros((len(X), len(self.approx_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.approx_)):
- for j in range(i+1, len(self.approx_)):
- A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
- Xfit[j,i] = Xfit[i,j]
- else:
- diag_proj = (1./2) * np.ones((2,2))
- approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
- for i in range(len(approx)):
- for j in range(len(self.approx_)):
- A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions, n_jobs=self.n_jobs)
- return Xfit
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein distance.
+ """
+ return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)
class BottleneckDistance(BaseEstimator, TransformerMixin):
"""
- This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+
+ :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
"""
- def __init__(self, epsilon=None):
+ def __init__(self, epsilon=None, n_jobs=None):
"""
Constructor for the BottleneckDistance class.
Parameters:
epsilon (double): absolute (additive) error tolerated on the distance (default is the smallest positive float), see :func:`gudhi.bottleneck_distance`.
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.epsilon = epsilon
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -116,48 +277,42 @@ class BottleneckDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
"""
- num_diag1 = len(X)
-
- #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- if X is self.diagrams_:
- matrix = np.zeros((num_diag1, num_diag1))
-
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(i+1, num_diag1):
- matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
- matrix[j,i] = matrix[i,j]
- else:
- print("Gudhi built without CGAL: returning a null matrix")
-
- else:
- num_diag2 = len(self.diagrams_)
- matrix = np.zeros((num_diag1, num_diag2))
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon, n_jobs=self.n_jobs)
+ return Xfit
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(num_diag2):
- matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
- else:
- print("Gudhi built without CGAL: returning a null matrix")
+ def __call__(self, diag1, diag2):
+ """
+ Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result.
- Xfit = matrix
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
- return Xfit
+ Returns:
+ float: bottleneck distance.
+ """
+ try:
+ from .. import bottleneck_distance
+ return bottleneck_distance(diag1, diag2, e=self.epsilon)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
"""
- def __init__(self, bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceFisherDistance class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions (default 1.).
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -168,11 +323,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- projection = (1./2) * np.ones((2,2))
- self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
return self
def transform(self, X):
@@ -185,60 +335,92 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
"""
- Xfit = np.zeros((len(X), len(self.diagrams_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
- else:
- Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher distance.
+ """
+ return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+
+class WassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams.
+ """
+
+ def __init__(self, order=1, internal_p=np.inf, mode="hera", delta=0.01, n_jobs=None):
+ """
+ Constructor for the WassersteinDistance class.
+
+ Parameters:
+ order (int): exponent for Wasserstein, default value is 1., see :func:`gudhi.wasserstein.wasserstein_distance`.
+ internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is `np.inf`, see :func:`gudhi.wasserstein.wasserstein_distance`.
+ mode (str): method for computing Wasserstein distance. Either "pot" or "hera". Default set to "hera".
+ delta (float): relative error 1+delta. Used only if mode == "hera".
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
+ """
+ self.order, self.internal_p, self.mode = order, internal_p, mode
+ if mode == "pot":
+ self.metric = "pot_wasserstein"
+ elif mode == "hera":
+ self.metric = "hera_wasserstein"
+ else:
+ raise NameError("Unknown mode. Current available values for mode are 'hera' and 'pot'")
+ self.delta = delta
+ self.n_jobs = n_jobs
+
+ def fit(self, X, y=None):
+ """
+ Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances.
+ """
+ if self.metric == "hera_wasserstein":
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta, n_jobs=self.n_jobs)
else:
- projection = (1./2) * np.ones((2,2))
- diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
- for i in range(len(X)):
- for j in range(len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- else:
- Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False, n_jobs=self.n_jobs)
return Xfit
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: Wasserstein distance.
+ """
+ if self.metric == "hera_wasserstein":
+ return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta)
+ else:
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a39b00e4..8722e162 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -1,10 +1,11 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Vincent Rouvreau
#
# Copyright (C) 2018-2019 Inria
#
# Modification(s):
+# - 2021/10 Vincent Rouvreau: Add DimensionSelector
# - YYYY/MM Author: Description of the modification
import numpy as np
@@ -54,6 +55,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
Xfit.append(new_diag)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BirthPersistenceTransform on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Clamping(BaseEstimator, TransformerMixin):
"""
This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
@@ -63,7 +76,7 @@ class Clamping(BaseEstimator, TransformerMixin):
Constructor for the Clamping class.
Parameters:
- limit (double): clamping value (default np.inf).
+ limit (float): clamping value (default np.inf).
"""
self.minimum = minimum
self.maximum = maximum
@@ -142,6 +155,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin):
Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
return Xfit
+ def __call__(self, diag):
+ """
+ Apply DiagramScaler on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Padding(BaseEstimator, TransformerMixin):
"""
This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
@@ -186,6 +211,18 @@ class Padding(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Padding on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: padded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class ProminentPoints(BaseEstimator, TransformerMixin):
"""
This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
@@ -198,7 +235,7 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
use (bool): whether to use the class or not (default False).
location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
- threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ threshold (float): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
"""
self.num_pts = num_pts
self.threshold = threshold
@@ -259,6 +296,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply ProminentPoints on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: thresholded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class DiagramSelector(BaseEstimator, TransformerMixin):
"""
This is a class for extracting finite or essential points in persistence diagrams.
@@ -269,7 +318,7 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
Parameters:
use (bool): whether to use the class or not (default False).
- limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ limit (float): second coordinate value that is the criterion for being an essential point (default numpy.inf).
point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
"""
self.use, self.limit, self.point_type = use, limit, point_type
@@ -303,3 +352,63 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
else:
Xfit = X
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply DiagramSelector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: extracted persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>DimensionSelector: fit_transform(<br/>[[array( Hi(X0) ), array( Hj(X0) ), ...],<br/> [array( Hi(X1) ), array( Hj(X1) ), ...],<br/> ...])
+# DimensionSelector->>thread1: _transform([array( Hi(X0) ), array( Hj(X0) )], ...)
+# DimensionSelector->>thread2: _transform([array( Hi(X1) ), array( Hj(X1) )], ...)
+# Note right of DimensionSelector: ...
+# thread1->>DimensionSelector: array( Hn(X0) )
+# thread2->>DimensionSelector: array( Hn(X1) )
+# Note right of DimensionSelector: ...
+# DimensionSelector->>USER: [array( Hn(X0) ), <br/> array( Hn(X1) ), <br/> ...]
+
+class DimensionSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class to select persistence diagrams in a specific dimension from its index.
+ """
+
+ def __init__(self, index=0):
+ """
+ Constructor for the DimensionSelector class.
+
+ Parameters:
+ index (int): The returned persistence diagrams dimension index. Default value is `0`.
+ """
+ self.index = index
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def transform(self, X, Y=None):
+ """
+ Select persistence diagrams from its dimension.
+
+ Parameters:
+ X (list of list of tuple): List of list of persistence pairs, i.e.
+ `[[array( Hi(X0) ), array( Hj(X0) ), ...], [array( Hi(X1) ), array( Hj(X1) ), ...], ...]`
+
+ Returns:
+ list of tuple:
+ Persistence diagrams in a specific dimension. i.e. if `index` was set to `m` and `Hn` is at index `m` of
+ the input, it returns `[array( Hn(X0) ), array( Hn(X1), ...]`
+ """
+
+ return [persistence[self.index] for persistence in X]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index fe26dbe2..ce74aee5 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -1,16 +1,25 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Martin Royer, Gard Spreemann
#
-# Copyright (C) 2018-2019 Inria
+# Copyright (C) 2018-2020 Inria
#
# Modification(s):
-# - YYYY/MM Author: Description of the modification
+# - 2020/06 Martin: ATOL integration
+# - 2020/12 Gard: A more flexible Betti curve class capable of computing exact curves.
+# - 2021/11 Vincent Rouvreau: factorize _automatic_sample_range
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
-from sklearn.neighbors import DistanceMetric
+from sklearn.metrics import pairwise
+try:
+ # New location since 1.0
+ from sklearn.metrics import DistanceMetric
+except ImportError:
+ # Will be removed in 1.3
+ from sklearn.neighbors import DistanceMetric
from .preprocessing import DiagramScaler, BirthPersistenceTransform
@@ -44,10 +53,14 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
if np.isnan(np.array(self.im_range)).any():
- new_X = BirthPersistenceTransform().fit_transform(X)
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ try:
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
return self
def transform(self, X):
@@ -77,15 +90,73 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
Xfit.append(image.flatten()[np.newaxis,:])
- Xfit = np.concatenate(Xfit,0)
+ Xfit = np.concatenate(Xfit, 0)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply PersistenceImage on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image.
+ """
+ return self.fit_transform([diag])[0,:]
+
+def _automatic_sample_range(sample_range, X):
+ """
+ Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
+
+ Parameters:
+ sample_range (a numpy array of 2 float): minimum and maximum of all piecewise-linear function domains, of
+ the form [x_min, x_max].
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ nan_in_range = np.isnan(sample_range)
+ if nan_in_range.any():
+ try:
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X)
+ [mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
+ [Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ return np.where(nan_in_range, np.array([mx, My]), sample_range)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
+ return sample_range
+
+
+def _trim_endpoints(x, are_endpoints_nan):
+ if are_endpoints_nan[0]:
+ x = x[1:]
+ if are_endpoints_nan[1]:
+ x = x[:-1]
+ return x
+
+
+def _grid_from_sample_range(self, X):
+ sample_range = np.array(self.sample_range)
+ self.nan_in_range = np.isnan(sample_range)
+ self.new_resolution = self.resolution
+ if not self.keep_endpoints:
+ self.new_resolution += self.nan_in_range.sum()
+ self.sample_range_fixed = _automatic_sample_range(sample_range, X)
+ self.grid_ = np.linspace(self.sample_range_fixed[0], self.sample_range_fixed[1], self.new_resolution)
+ if not self.keep_endpoints:
+ self.grid_ = _trim_endpoints(self.grid_, self.nan_in_range)
+
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the landscapes are computed.
"""
- def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Landscape class.
@@ -93,10 +164,10 @@ class Landscape(BaseEstimator, TransformerMixin):
num_landscapes (int): number of piecewise-linear functions to output (default 5).
resolution (int): number of sample for all piecewise-linear functions (default 100).
sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -106,10 +177,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if self.nan_in_range.any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(self.nan_in_range, np.array([mx, My]), np.array(self.sample_range))
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -122,59 +190,47 @@ class Landscape(BaseEstimator, TransformerMixin):
Returns:
numpy array with shape (number of diagrams) x (number of samples = **num_landscapes** x **resolution**): output persistence landscapes.
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- step_x = x_values[1] - x_values[0]
- for i in range(num_diag):
-
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
-
- ls = np.zeros([self.num_landscapes, self.new_resolution])
-
- events = []
- for j in range(self.new_resolution):
- events.append([])
-
- for j in range(num_pts_in_diag):
- [px,py] = diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
- mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
-
- if min_idx < self.new_resolution and max_idx > 0:
-
- landscape_value = self.sample_range[0] + min_idx * step_x - px
- for k in range(min_idx, mid_idx):
- events[k].append(landscape_value)
- landscape_value += step_x
+ Xfit = []
+ x_values = self.grid_
+ for diag in X:
+ midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
+ tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
+ n_points = diag.shape[0]
+ # Complete the array with zeros to get the right number of landscapes
+ if self.num_landscapes > n_points:
+ tent_functions = np.concatenate(
+ [tent_functions, np.zeros((tent_functions.shape[0], self.num_landscapes-n_points))],
+ axis=1
+ )
+ tent_functions.partition(tent_functions.shape[1]-self.num_landscapes, axis=1)
+ landscapes = np.sort(tent_functions[:, -self.num_landscapes:], axis=1)[:, ::-1].T
- landscape_value = py - self.sample_range[0] - mid_idx * step_x
- for k in range(mid_idx, max_idx):
- events[k].append(landscape_value)
- landscape_value -= step_x
+ landscapes = np.sqrt(2) * np.ravel(landscapes)
+ Xfit.append(landscapes)
- for j in range(self.new_resolution):
- events[j].sort(reverse=True)
- for k in range( min(self.num_landscapes, len(events[j])) ):
- ls[k,j] = events[j][k]
+ return np.stack(Xfit, axis=0)
- if self.nan_in_range[0]:
- ls = ls[:,1:]
- if self.nan_in_range[1]:
- ls = ls[:,:-1]
- ls = np.sqrt(2)*np.reshape(ls,[1,-1])
- Xfit.append(ls)
+ def __call__(self, diag):
+ """
+ Apply Landscape on a single persistence diagram and outputs the result.
- Xfit = np.concatenate(Xfit,0)
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
- return Xfit
+ Returns:
+ numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape.
+ """
+ return self.fit_transform([diag])[0, :]
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the silhouette is computed.
"""
- def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Silhouette class.
@@ -182,8 +238,10 @@ class Silhouette(BaseEstimator, TransformerMixin):
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
resolution (int): number of samples for the weighted average (default 100).
sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -193,10 +251,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -209,110 +264,200 @@ class Silhouette(BaseEstimator, TransformerMixin):
Returns:
numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
+ Xfit = []
+ x_values = self.grid_
- for i in range(num_diag):
+ for diag in X:
+ midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
+ weights = np.array([self.weight(pt) for pt in diag])
+ total_weight = np.sum(weights)
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
+ silhouette = np.sum(weights[None, :] / total_weight * tent_functions, axis=1)
+ Xfit.append(silhouette * np.sqrt(2))
- sh, weights = np.zeros(self.resolution), np.zeros(num_pts_in_diag)
- for j in range(num_pts_in_diag):
- weights[j] = self.weight(diagram[j,:])
- total_weight = np.sum(weights)
+ return np.stack(Xfit, axis=0)
- for j in range(num_pts_in_diag):
+ def __call__(self, diag):
+ """
+ Apply Silhouette on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output persistence silhouette.
+ """
+ return self.fit_transform([diag])[0,:]
- [px,py] = diagram[j,:2]
- weight = weights[j] / total_weight
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- if min_idx < self.resolution and max_idx > 0:
+class BettiCurve(BaseEstimator, TransformerMixin):
+ """
+ Compute Betti curves from persistence diagrams. There are several modes of operation: with a given resolution (with or without a sample_range), with a predefined grid, and with none of the previous. With a predefined grid, the class computes the Betti numbers at those grid points. Without a predefined grid, if the resolution is set to None, it can be fit to a list of persistence diagrams and produce a grid that consists of (at least) the filtration values at which at least one of those persistence diagrams changes Betti numbers, and then compute the Betti numbers at those grid points. In the latter mode, the exact Betti curve is computed for the entire real line. Otherwise, if the resolution is given, the Betti curve is obtained by sampling evenly using either the given sample_range or based on the persistence diagrams.
- silhouette_value = self.sample_range[0] + min_idx * step_x - px
- for k in range(min_idx, mid_idx):
- sh[k] += weight * silhouette_value
- silhouette_value += step_x
+ Examples
+ --------
+ If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
- silhouette_value = py - self.sample_range[0] - mid_idx * step_x
- for k in range(mid_idx, max_idx):
- sh[k] += weight * silhouette_value
- silhouette_value -= step_x
+ >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
+ >>> result = bc(pd) # doctest: +SKIP
- Xfit.append(np.reshape(np.sqrt(2) * sh, [1,-1]))
+ and
- Xfit = np.concatenate(Xfit, 0)
+ >>> from scipy.interpolate import interp1d # doctest: +SKIP
+ >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
+ >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
+ >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
+ >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
- return Xfit
+ are the same.
-class BettiCurve(BaseEstimator, TransformerMixin):
+ Attributes
+ ----------
+ grid_ : 1d array
+ The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not and resolution is None, the grid is fitted to capture all filtration values at which the Betti numbers change.
"""
- This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
- """
- def __init__(self, resolution=100, sample_range=[np.nan, np.nan]):
+
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None, *, keep_endpoints=False):
"""
Constructor for the BettiCurve class.
Parameters:
- resolution (int): number of sample for the piecewise-constant function (default 100).
+ resolution (int): number of samples for the piecewise-constant function (default 100), or None for the exact curve.
sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ keep_endpoints (bool): when computing `sample_range` (fixed `resolution`, no `predefined_grid`), use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
- self.resolution, self.sample_range = resolution, sample_range
- def fit(self, X, y=None):
+ if (predefined_grid is not None) and (not isinstance(predefined_grid, np.ndarray)):
+ raise ValueError("Expected predefined_grid as array or None.")
+
+ self.predefined_grid = predefined_grid
+ self.resolution = resolution
+ self.sample_range = sample_range
+ self.keep_endpoints = keep_endpoints
+
+ def is_fitted(self):
+ return hasattr(self, "grid_")
+
+ def fit(self, X, y = None):
"""
- Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+ Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams. When no predefined grid is provided and resolution set to None, compute a filtration grid that captures all changes in Betti numbers for all the given persistence diagrams.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
- y (n x 1 array): persistence diagram labels (unused).
+ X (list of 2d arrays): Persistence diagrams.
+ y (None): Ignored.
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+
+ if self.predefined_grid is None:
+ if self.resolution is None: # Flexible/exact version
+ events = np.unique(np.concatenate([pd.flatten() for pd in X] + [[-np.inf]], axis=0))
+ self.grid_ = np.array(events)
+ else:
+ _grid_from_sample_range(self, X)
+ else:
+ self.grid_ = self.predefined_grid # Get the predefined grid from user
+
return self
def transform(self, X):
"""
- Compute the Betti curve for each persistence diagram individually and concatenate the results.
+ Compute Betti curves.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
-
+ X (list of 2d arrays): Persistence diagrams.
+
Returns:
- numpy array with shape (number of diagrams) x (**resolution**): output Betti curves.
+ `len(X).len(self.grid_)` array of ints: Betti numbers of the given persistence diagrams at the grid points given in `self.grid_`
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
- for i in range(num_diag):
+ if not self.is_fitted():
+ raise NotFittedError("Not fitted.")
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ if not X:
+ X = [np.zeros((0, 2))]
+
+ N = len(X)
- bc = np.zeros(self.resolution)
- for j in range(num_pts_in_diag):
- [px,py] = diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- for k in range(min_idx, max_idx):
- bc[k] += 1
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
- Xfit.append(np.reshape(bc,[1,-1]))
+ bettis = [[0] for i in range(0, N)]
+
+ i = 0
+ for x in self.grid_:
+ while i < len(sorting) and events[sorting[i]] <= x:
+ j = np.searchsorted(ends, sorting[i])
+ delta = 1 if sorting[i] - starts[j] < len(X[j]) else -1
+ bettis[j][-1] += delta
+ i += 1
+ for k in range(0, N):
+ bettis[k].append(bettis[k][-1])
+
+ return np.array(bettis, dtype=int)[:, 0:-1]
+
+ def fit_transform(self, X):
+ """
+ The result is the same as fit(X) followed by transform(X), but potentially faster.
+ """
+
+ if self.predefined_grid is None and self.resolution is None:
+ if not X:
+ X = [np.zeros((0, 2))]
+
+ N = len(X)
+
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
+
+ xs = [-np.inf]
+ bettis = [[0] for i in range(0, N)]
+
+ for i in sorting:
+ j = np.searchsorted(ends, i)
+ delta = 1 if i - starts[j] < len(X[j]) else -1
+ if events[i] == xs[-1]:
+ bettis[j][-1] += delta
+ else:
+ xs.append(events[i])
+ for k in range(0, j):
+ bettis[k].append(bettis[k][-1])
+ bettis[j].append(bettis[j][-1] + delta)
+ for k in range(j+1, N):
+ bettis[k].append(bettis[k][-1])
+
+ self.grid_ = np.array(xs)
+ return np.array(bettis, dtype=int)
+
+ else:
+ return self.fit(X).transform(X)
+
+ def __call__(self, diag):
+ """
+ Shorthand for transform on a single persistence diagram.
+ """
+ return self.fit_transform([diag])[0, :]
- Xfit = np.concatenate(Xfit, 0)
- return Xfit
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+
+ Attributes:
+ grid_ (1d array): In vector mode, the grid on which the entropy summary function is computed.
"""
- def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Entropy class.
@@ -321,8 +466,10 @@ class Entropy(BaseEstimator, TransformerMixin):
normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -332,10 +479,9 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ if self.mode == "vector":
+ _grid_from_sample_range(self, X)
+ self.step_ = self.grid_[1] - self.grid_[0]
return self
def transform(self, X):
@@ -349,34 +495,41 @@ class Entropy(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
"""
num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
-
- orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
- new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
-
+ orig_diagram, new_diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
+
+ p = new_diagram[:,1]
+ p = p/np.sum(p)
if self.mode == "scalar":
- ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
+ ent = -np.dot(p, np.log(p))
Xfit.append(np.array([[ent]]))
-
else:
ent = np.zeros(self.resolution)
for j in range(num_pts_in_diag):
[px,py] = orig_diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- for k in range(min_idx, max_idx):
- ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
- if self.normalized:
- ent = ent / np.linalg.norm(ent, ord=1)
- Xfit.append(np.reshape(ent,[1,-1]))
+ min_idx = np.clip(np.ceil((px - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
+ ent[min_idx:max_idx]-=p[j]*np.log(p[j])
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, axis=0)
+ return Xfit
- Xfit = np.concatenate(Xfit, 0)
+ def __call__(self, diag):
+ """
+ Apply Entropy on a single persistence diagram and outputs the result.
- return Xfit
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ return self.fit_transform([diag])[0,:]
class TopologicalVector(BaseEstimator, TransformerMixin):
"""
@@ -424,13 +577,31 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
diagram, num_pts_in_diag = X[i], X[i].shape[0]
pers = 0.5 * (diagram[:,1]-diagram[:,0])
min_pers = np.minimum(pers,np.transpose(pers))
- distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ # Works fine with sklearn 1.0, but an ValueError exception is thrown on past versions
+ try:
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ distances = np.empty(shape = [0, 0])
vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
dim = min(len(vect), thresh)
Xfit[i, :dim] = vect[:dim]
return Xfit
+ def __call__(self, diag):
+ """
+ Apply TopologicalVector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output topological vector.
+ """
+ return self.fit_transform([diag])[0,:]
+
class ComplexPolynomial(BaseEstimator, TransformerMixin):
"""
This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
@@ -490,3 +661,155 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin):
coeff = np.array(coeff[::-1])[1:]
Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply ComplexPolynomial on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output complex vector of coefficients.
+ """
+ return self.fit_transform([diag])[0,:]
+
+def _lapl_contrast(measure, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ return np.exp(-pairwise.pairwise_distances(measure, Y=centers) / inertias)
+
+def _gaus_contrast(measure, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ return np.exp(-pairwise.pairwise_distances(measure, Y=centers, squared=True) / inertias**2)
+
+def _indicator_contrast(diags, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ robe_curve = np.clip(2-pairwise.pairwise_distances(diags, Y=centers)/inertias, 0, 1)
+ return robe_curve
+
+def _cloud_weighting(measure):
+ """automatic uniform weighting with mass 1 for `measure` in ATOL"""
+ return np.ones(shape=measure.shape[0])
+
+def _iidproba_weighting(measure):
+ """automatic uniform weighting with mass 1/N for `measure` in ATOL"""
+ return np.ones(shape=measure.shape[0]) / measure.shape[0]
+
+class Atol(BaseEstimator, TransformerMixin):
+ """
+ This class allows to vectorise measures (e.g. point clouds, persistence diagrams, etc) after a quantisation step.
+
+ ATOL paper: :cite:`royer2019atol`
+
+ Example
+ --------
+ >>> from sklearn.cluster import KMeans
+ >>> from gudhi.representations.vector_methods import Atol
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ >>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ >>> c = np.array([[3, 2, -1], [1, 2, -1]])
+ >>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers
+ array([[ 2.6 , 2.8 , -0.4 ],
+ [ 2. , 0.66666667, 3.33333333]])
+ >>> atol_vectoriser(a)
+ array([0.42375966, 1.18168665])
+ >>> atol_vectoriser(c)
+ array([1.25157463, 0.02062512])
+ >>> atol_vectoriser.transform(X=[a, b, c])
+ array([[0.42375966, 1.18168665],
+ [1.06330156, 0.29861028],
+ [1.25157463, 0.02062512]])
+ """
+ # Note the example above must be up to date with the one in tests called test_atol_doc
+ def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
+ """
+ Constructor for the Atol measure vectorisation class.
+
+ Parameters:
+ quantiser (Object): Object with `fit` (sklearn API consistent) and `cluster_centers` and `n_clusters`
+ attributes, e.g. sklearn.cluster.KMeans. It will be fitted when the Atol object function `fit` is called.
+ weighting_method (string): constant generic function for weighting the measure points
+ choose from {"cloud", "iidproba"}
+ (default: constant function, i.e. the measure is seen as a point cloud by default).
+ This will have no impact if weights are provided along with measures all the way: `fit` and `transform`.
+ contrast (string): constant function for evaluating proximity of a measure with respect to centers
+ choose from {"gaussian", "laplacian", "indicator"}
+ (default: gaussian contrast function, see page 3 in the ATOL paper).
+ """
+ self.quantiser = quantiser
+ self.contrast = {
+ "gaussian": _gaus_contrast,
+ "laplacian": _lapl_contrast,
+ "indicator": _indicator_contrast,
+ }.get(contrast, _gaus_contrast)
+ self.weighting_method = {
+ "cloud" : _cloud_weighting,
+ "iidproba": _iidproba_weighting,
+ }.get(weighting_method, _cloud_weighting)
+
+ def fit(self, X, y=None, sample_weight=None):
+ """
+ Calibration step: fit centers to the sample measures and derive inertias between centers.
+
+ Parameters:
+ X (list N x d numpy arrays): input measures in R^d from which to learn center locations and inertias
+ (measures can have different N).
+ y: Ignored, present for API consistency by convention.
+ sample_weight (list of numpy arrays): weights for each measure point in X, optional.
+ If None, the object's weighting_method will be used.
+
+ Returns:
+ self
+ """
+ if not hasattr(self.quantiser, 'fit'):
+ raise TypeError("quantiser %s has no `fit` attribute." % (self.quantiser))
+ if sample_weight is None:
+ sample_weight = np.concatenate([self.weighting_method(measure) for measure in X])
+
+ measures_concat = np.concatenate(X)
+ self.quantiser.fit(X=measures_concat, sample_weight=sample_weight)
+ self.centers = self.quantiser.cluster_centers_
+ # Hack, but some people are unhappy if the order depends on the version of sklearn
+ self.centers = self.centers[np.lexsort(self.centers.T)]
+ if self.quantiser.n_clusters == 1:
+ dist_centers = pairwise.pairwise_distances(measures_concat)
+ np.fill_diagonal(dist_centers, 0)
+ self.inertias = np.array([np.max(dist_centers)/2])
+ else:
+ dist_centers = pairwise.pairwise_distances(self.centers)
+ dist_centers[dist_centers == 0] = np.inf
+ self.inertias = np.min(dist_centers, axis=0)/2
+ return self
+
+ def __call__(self, measure, sample_weight=None):
+ """
+ Apply measure vectorisation on a single measure.
+
+ Parameters:
+ measure (n x d numpy array): input measure in R^d.
+
+ Returns:
+ numpy array in R^self.quantiser.n_clusters.
+ """
+ if sample_weight is None:
+ sample_weight = self.weighting_method(measure)
+ return np.sum(sample_weight * self.contrast(measure, self.centers, self.inertias.T).T, axis=1)
+
+ def transform(self, X, sample_weight=None):
+ """
+ Apply measure vectorisation on a list of measures.
+
+ Parameters:
+ X (list N x d numpy arrays): input measures in R^d from which to learn center locations and inertias
+ (measures can have different N).
+ sample_weight (list of numpy arrays): weights for each measure point in X, optional.
+ If None, the object's weighting_method will be used.
+
+ Returns:
+ numpy array with shape (number of measures) x (self.quantiser.n_clusters).
+ """
+ if sample_weight is None:
+ sample_weight = [self.weighting_method(measure) for measure in X]
+ return np.stack([self(measure, sample_weight=weight) for measure, weight in zip(X, sample_weight)])
diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx
index deb8057a..d748f91e 100644
--- a/src/python/gudhi/rips_complex.pyx
+++ b/src/python/gudhi/rips_complex.pyx
@@ -23,12 +23,12 @@ __license__ = "MIT"
cdef extern from "Rips_complex_interface.h" namespace "Gudhi":
cdef cppclass Rips_complex_interface "Gudhi::rips_complex::Rips_complex_interface":
- Rips_complex_interface()
- void init_points(vector[vector[double]] values, double threshold)
- void init_matrix(vector[vector[double]] values, double threshold)
- void init_points_sparse(vector[vector[double]] values, double threshold, double sparse)
- void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse)
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) except +
+ Rips_complex_interface() nogil
+ void init_points(vector[vector[double]] values, double threshold) nogil
+ void init_matrix(vector[vector[double]] values, double threshold) nogil
+ void init_points_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) nogil except +
# RipsComplex python interface
cdef class RipsComplex:
@@ -41,31 +41,30 @@ cdef class RipsComplex:
cdef Rips_complex_interface thisref
# Fake constructor that does nothing but documenting the constructor
- def __init__(self, points=None, distance_matrix=None,
+ def __init__(self, *, points=None, distance_matrix=None,
max_edge_length=float('inf'), sparse=None):
"""RipsComplex constructor.
- :param max_edge_length: Rips value.
- :type max_edge_length: float
-
:param points: A list of points in d-Dimension.
- :type points: list of list of double
+ :type points: List[List[float]]
Or
:param distance_matrix: A distance matrix (full square or lower
triangular).
- :type points: list of list of double
+ :type distance_matrix: List[List[float]]
And in both cases
+ :param max_edge_length: Rips value.
+ :type max_edge_length: float
:param sparse: If this is not None, it switches to building a sparse
Rips and represents the approximation parameter epsilon.
:type sparse: float
"""
# The real cython constructor
- def __cinit__(self, points=None, distance_matrix=None,
+ def __cinit__(self, *, points=None, distance_matrix=None,
max_edge_length=float('inf'), sparse=None):
if sparse is not None:
if distance_matrix is not None:
@@ -89,14 +88,15 @@ cdef class RipsComplex:
def create_simplex_tree(self, max_dimension=1):
"""
- :param max_dimension: graph expansion for rips until this given maximal
+ :param max_dimension: graph expansion for Rips until this given maximal
dimension.
:type max_dimension: int
- :returns: A simplex tree created from the Delaunay Triangulation.
+ :returns: A simplex tree encoding the Vietoris–Rips filtration.
:rtype: SimplexTree
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
- max_dimension)
+ cdef int maxdim = max_dimension
+ with nogil:
+ self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, maxdim)
return stree
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 96d14079..5309c6fa 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -21,35 +21,76 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_options_full_featured:
pass
+ cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Simplex_handle":
+ pass
+
+ cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Complex_simplex_iterator":
+ Simplex_tree_simplices_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_simplices_iterator operator++() nogil
+ bint operator!=(Simplex_tree_simplices_iterator) nogil
+
+ cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Skeleton_simplex_iterator":
+ Simplex_tree_skeleton_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_skeleton_iterator operator++() nogil
+ bint operator!=(Simplex_tree_skeleton_iterator) nogil
+
+ cdef cppclass Simplex_tree_boundary_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Boundary_simplex_iterator":
+ Simplex_tree_boundary_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_boundary_iterator operator++() nogil
+ bint operator!=(Simplex_tree_boundary_iterator) nogil
+
+
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
- Simplex_tree()
- double simplex_filtration(vector[int] simplex)
- void assign_simplex_filtration(vector[int] simplex, double filtration)
- void initialize_filtration()
- int num_vertices()
- int num_simplices()
- void set_dimension(int dimension)
- int dimension()
- int upper_bound_dimension()
- bool find_simplex(vector[int] simplex)
- bool insert_simplex_and_subfaces(vector[int] simplex,
- double filtration)
- vector[pair[vector[int], double]] get_filtration()
- vector[pair[vector[int], double]] get_skeleton(int dimension)
- vector[pair[vector[int], double]] get_star(vector[int] simplex)
- vector[pair[vector[int], double]] get_cofaces(vector[int] simplex,
- int dimension)
- void expansion(int max_dim) except +
- void remove_maximal_simplex(vector[int] simplex)
- bool prune_above_filtration(double filtration)
- bool make_filtration_non_decreasing()
+ Simplex_tree_interface_full_featured() nogil
+ Simplex_tree_interface_full_featured(Simplex_tree_interface_full_featured&) nogil
+ double simplex_filtration(vector[int] simplex) nogil
+ void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
+ void initialize_filtration() nogil
+ int num_vertices() nogil
+ int num_simplices() nogil
+ void set_dimension(int dimension) nogil
+ int dimension() nogil
+ int upper_bound_dimension() nogil
+ bool find_simplex(vector[int] simplex) nogil
+ bool insert(vector[int] simplex, double filtration) nogil
+ void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) nogil except +
+ void insert_batch_vertices(vector[int] v, double f) nogil except +
+ vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil
+ vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil
+ void expansion(int max_dim) nogil except +
+ void remove_maximal_simplex(vector[int] simplex) nogil
+ bool prune_above_filtration(double filtration) nogil
+ bool make_filtration_non_decreasing() nogil
+ void compute_extended_filtration() nogil
+ Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil except +
+ void reset_filtration(double filtration, int dimension) nogil
+ bint operator==(Simplex_tree_interface_full_featured) nogil
+ # Iterators over Simplex tree
+ pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_end() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) nogil
+ pair[Simplex_tree_boundary_iterator, Simplex_tree_boundary_iterator] get_boundary_iterators(vector[int] simplex) nogil except +
+ # Expansion with blockers
+ ctypedef bool (*blocker_func_t)(vector[int], void *user_data)
+ void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data)
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
- cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
- Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
- void write_output_diagram(string diagram_file_name)
- vector[pair[vector[int], vector[int]]] persistence_pairs()
+ cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>>":
+ Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
+ void write_output_diagram(string diagram_file_name) nogil except +
+ vector[pair[vector[int], vector[int]]] persistence_pairs() nogil
+ pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() nogil
+ pair[vector[vector[int]], vector[vector[int]]] flag_generators() nogil
+ vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(double min_persistence) nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index b18627c4..4cf176f5 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -7,14 +7,28 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
-from libc.stdint cimport intptr_t
-from numpy import array as np_array
-cimport simplex_tree
+from cython.operator import dereference, preincrement
+from libc.stdint cimport intptr_t, int32_t, int64_t
+import numpy as np
+cimport gudhi.simplex_tree
+cimport cython
+from numpy.math cimport INFINITY
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
+ctypedef fused some_int:
+ int32_t
+ int64_t
+
+ctypedef fused some_float:
+ float
+ double
+
+cdef bool callback(vector[int] simplex, void *blocker_func):
+ return (<object>blocker_func)(simplex)
+
# SimplexTree python interface
cdef class SimplexTree:
"""The simplex tree is an efficient and flexible data structure for
@@ -31,19 +45,35 @@ cdef class SimplexTree:
cdef public intptr_t thisptr
# Get the pointer casted as it should be
- cdef Simplex_tree_interface_full_featured* get_ptr(self):
+ cdef Simplex_tree_interface_full_featured* get_ptr(self) nogil:
return <Simplex_tree_interface_full_featured*>(self.thisptr)
cdef Simplex_tree_persistence_interface * pcohptr
# Fake constructor that does nothing but documenting the constructor
- def __init__(self):
+ def __init__(self, other = None):
"""SimplexTree constructor.
+
+ :param other: If `other` is `None` (default value), an empty `SimplexTree` is created.
+ If `other` is a `SimplexTree`, the `SimplexTree` is constructed from a deep copy of `other`.
+ :type other: SimplexTree (Optional)
+ :returns: An empty or a copy simplex tree.
+ :rtype: SimplexTree
+
+ :raises TypeError: In case `other` is neither `None`, nor a `SimplexTree`.
+ :note: If the `SimplexTree` is a copy, the persistence information is not copied. If you need it in the clone,
+ you have to call :func:`compute_persistence` on it even if you had already computed it in the original.
"""
# The real cython constructor
- def __cinit__(self):
- self.thisptr = <intptr_t>(new Simplex_tree_interface_full_featured())
+ def __cinit__(self, other = None):
+ if other:
+ if isinstance(other, SimplexTree):
+ self.thisptr = _get_copy_intptr(other)
+ else:
+ raise TypeError("`other` argument requires to be of type `SimplexTree`, or `None`.")
+ else:
+ self.thisptr = <intptr_t>(new Simplex_tree_interface_full_featured())
def __dealloc__(self):
cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
@@ -62,12 +92,27 @@ cdef class SimplexTree:
"""
return self.pcohptr != NULL
+ def copy(self):
+ """
+ :returns: A simplex tree that is a deep copy of itself.
+ :rtype: SimplexTree
+
+ :note: The persistence information is not copied. If you need it in the clone, you have to call
+ :func:`compute_persistence` on it even if you had already computed it in the original.
+ """
+ stree = SimplexTree()
+ stree.thisptr = _get_copy_intptr(self)
+ return stree
+
+ def __deepcopy__(self):
+ return self.copy()
+
def filtration(self, simplex):
"""This function returns the filtration value for a given N-simplex in
this simplicial complex, or +infinity if it is not in the complex.
:param simplex: The N-simplex, represented by a list of vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:returns: The simplicial complex filtration value.
:rtype: float
"""
@@ -78,7 +123,7 @@ cdef class SimplexTree:
given N-simplex.
:param simplex: The N-simplex, represented by a list of vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:param filtration: The new filtration value.
:type filtration: float
@@ -89,7 +134,7 @@ cdef class SimplexTree:
(with more :meth:`assign_filtration` or
:meth:`make_filtration_non_decreasing` for instance) before calling
any function that relies on the filtration property, like
- :meth:`initialize_filtration`.
+ :meth:`persistence`.
"""
self.get_ptr().assign_simplex_filtration(simplex, filtration)
@@ -97,17 +142,10 @@ cdef class SimplexTree:
"""This function initializes and sorts the simplicial complex
filtration vector.
- .. note::
-
- This function must be launched before
- :func:`persistence()<gudhi.SimplexTree.persistence>`,
- :func:`betti_numbers()<gudhi.SimplexTree.betti_numbers>`,
- :func:`persistent_betti_numbers()<gudhi.SimplexTree.persistent_betti_numbers>`,
- or :func:`get_filtration()<gudhi.SimplexTree.get_filtration>`
- after :func:`inserting<gudhi.SimplexTree.insert>` or
- :func:`removing<gudhi.SimplexTree.remove_maximal_simplex>`
- simplices.
+ .. deprecated:: 3.2.0
"""
+ import warnings
+ warnings.warn("Since Gudhi 3.2, calling SimplexTree.initialize_filtration is unnecessary.", DeprecationWarning)
self.get_ptr().initialize_filtration()
def num_vertices(self):
@@ -138,9 +176,9 @@ cdef class SimplexTree:
This function is not constant time because it can recompute
dimension if required (can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
methods).
"""
return self.get_ptr().dimension()
@@ -158,16 +196,16 @@ cdef class SimplexTree:
"""This function sets the dimension of the simplicial complex.
:param dimension: The new dimension value.
- :type dimension: int.
+ :type dimension: int
.. note::
This function must be used with caution because it disables
dimension recomputation when required
(this recomputation can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
).
"""
self.get_ptr().set_dimension(<int>dimension)
@@ -177,14 +215,11 @@ cdef class SimplexTree:
complex or not.
:param simplex: The N-simplex to find, represented by a list of vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:returns: true if the simplex was found, false otherwise.
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().find_simplex(csimplex)
+ return self.get_ptr().find_simplex(simplex)
def insert(self, simplex, filtration=0.0):
"""This function inserts the given N-simplex and its subfaces with the
@@ -194,60 +229,149 @@ cdef class SimplexTree:
:param simplex: The N-simplex to insert, represented by a list of
vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:param filtration: The filtration value of the simplex.
- :type filtration: float.
+ :type filtration: float
:returns: true if the simplex was not yet in the complex, false
otherwise (whatever its original filtration value).
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().insert_simplex_and_subfaces(csimplex,
- <double>filtration)
+ return self.get_ptr().insert(simplex, <double>filtration)
- def get_filtration(self):
- """This function returns a list of all simplices with their given
+ @staticmethod
+ @cython.boundscheck(False)
+ def create_from_array(filtrations, double max_filtration=INFINITY):
+ """Creates a new, empty complex and inserts vertices and edges. The vertices are numbered from 0 to n-1, and
+ the filtration values are encoded in the array, with the diagonal representing the vertices. It is the
+ caller's responsibility to ensure that this defines a filtration, which can be achieved with either::
+
+ filtrations[np.diag_indices_from(filtrations)] = filtrations.min(axis=1)
+
+ or::
+
+ diag = filtrations.diagonal()
+ filtrations = np.fmax(np.fmax(filtrations, diag[:, None]), diag[None, :])
+
+ :param filtrations: the filtration values of the vertices and edges to insert. The matrix is assumed to be symmetric.
+ :type filtrations: numpy.ndarray of shape (n,n)
+ :param max_filtration: only insert vertices and edges with filtration values no larger than max_filtration
+ :type max_filtration: float
+ :returns: the new complex
+ :rtype: SimplexTree
+ """
+ # TODO: document which half of the matrix is actually read?
+ filtrations = np.asanyarray(filtrations, dtype=float)
+ cdef double[:,:] F = filtrations
+ ret = SimplexTree()
+ cdef int n = F.shape[0]
+ assert n == F.shape[1], 'create_from_array() expects a square array'
+ with nogil:
+ ret.get_ptr().insert_matrix(&F[0,0], n, F.strides[0], F.strides[1], max_filtration)
+ return ret
+
+ def insert_edges_from_coo_matrix(self, edges):
+ """Inserts edges given by a sparse matrix in `COOrdinate format
+ <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html>`_.
+ If an edge is repeated, the smallest filtration value is used. Missing entries are not inserted.
+ Diagonal entries are currently interpreted as vertices, although we do not guarantee this behavior
+ in the future, and this is only useful if you want to insert vertices with a smaller filtration value
+ than the smallest edge containing it, since vertices are implicitly inserted together with the edges.
+
+ :param edges: the edges to insert and their filtration values.
+ :type edges: scipy.sparse.coo_matrix of shape (n,n)
+
+ .. seealso:: :func:`insert_batch`
+ """
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ self.get_ptr().insert_batch_vertices(np.unique(np.stack((edges.row, edges.col))), INFINITY)
+ # TODO: optimize this?
+ for edge in zip(edges.row, edges.col, edges.data):
+ self.get_ptr().insert((edge[0], edge[1]), edge[2])
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def insert_batch(self, some_int[:,:] vertex_array, some_float[:] filtrations):
+ """Inserts k-simplices given by a sparse array in a format similar
+ to `torch.sparse <https://pytorch.org/docs/stable/sparse.html>`_.
+ The n-th simplex has vertices `vertex_array[0,n]`, ...,
+ `vertex_array[k,n]` and filtration value `filtrations[n]`.
+ If a simplex is repeated, the smallest filtration value is used.
+ Simplices with a repeated vertex are currently interpreted as lower
+ dimensional simplices, but we do not guarantee this behavior in the
+ future. Any time a simplex is inserted, its faces are inserted as well
+ if needed to preserve a simplicial complex.
+
+ :param vertex_array: the k-simplices to insert.
+ :type vertex_array: numpy.array of shape (k+1,n)
+ :param filtrations: the filtration values.
+ :type filtrations: numpy.array of shape (n,)
+ """
+ cdef vector[int] vertices = np.unique(vertex_array)
+ cdef Py_ssize_t k = vertex_array.shape[0]
+ cdef Py_ssize_t n = vertex_array.shape[1]
+ assert filtrations.shape[0] == n, 'inconsistent sizes for vertex_array and filtrations'
+ cdef Py_ssize_t i
+ cdef Py_ssize_t j
+ cdef vector[int] v
+ with nogil:
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ # NaN currently does the wrong thing
+ self.get_ptr().insert_batch_vertices(vertices, INFINITY)
+ for i in range(n):
+ for j in range(k):
+ v.push_back(vertex_array[j, i])
+ self.get_ptr().insert(v, filtrations[i])
+ v.clear()
+
+ def get_simplices(self):
+ """This function returns a generator with simplices and their given
filtration values.
+ :returns: The simplices.
+ :rtype: generator with tuples(simplex, filtration)
+ """
+ cdef Simplex_tree_simplices_iterator it = self.get_ptr().get_simplices_iterator_begin()
+ cdef Simplex_tree_simplices_iterator end = self.get_ptr().get_simplices_iterator_end()
+ cdef Simplex_tree_simplex_handle sh = dereference(it)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
+
+ def get_filtration(self):
+ """This function returns a generator with simplices and their given
+ filtration values sorted by increasing filtration values.
+
:returns: The simplices sorted by increasing filtration values.
- :rtype: list of tuples(simplex, filtration)
+ :rtype: generator with tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] filtration \
- = self.get_ptr().get_filtration()
- ct = []
- for filtered_complex in filtration:
- v = []
- for vertex in filtered_complex.first:
- v.append(vertex)
- ct.append((v, filtered_complex.second))
- return ct
+ cdef vector[Simplex_tree_simplex_handle].const_iterator it = self.get_ptr().get_filtration_iterator_begin()
+ cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end()
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_skeleton(self, dimension):
- """This function returns the (simplices of the) skeleton of a maximum
- given dimension.
+ """This function returns a generator with the (simplices of the) skeleton of a maximum given dimension.
:param dimension: The skeleton dimension value.
- :type dimension: int.
+ :type dimension: int
:returns: The (simplices of the) skeleton of a maximum dimension.
- :rtype: list of tuples(simplex, filtration)
+ :rtype: generator with tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] skeleton \
- = self.get_ptr().get_skeleton(<int>dimension)
- ct = []
- for filtered_simplex in skeleton:
- v = []
- for vertex in filtered_simplex.first:
- v.append(vertex)
- ct.append((v, filtered_simplex.second))
- return ct
+ cdef Simplex_tree_skeleton_iterator it = self.get_ptr().get_skeleton_iterator_begin(dimension)
+ cdef Simplex_tree_skeleton_iterator end = self.get_ptr().get_skeleton_iterator_end(dimension)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_star(self, simplex):
"""This function returns the star of a given N-simplex.
:param simplex: The N-simplex, represented by a list of vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:returns: The (simplices of the) star of a simplex.
:rtype: list of tuples(simplex, filtration)
"""
@@ -269,10 +393,10 @@ cdef class SimplexTree:
given codimension.
:param simplex: The N-simplex, represented by a list of vertex.
- :type simplex: list of int.
+ :type simplex: list of int
:param codimension: The codimension. If codimension = 0, all cofaces
are returned (equivalent of get_star function)
- :type codimension: int.
+ :type codimension: int
:returns: The (simplices of the) cofaces of a simplex
:rtype: list of tuples(simplex, filtration)
"""
@@ -289,26 +413,37 @@ cdef class SimplexTree:
ct.append((v, filtered_simplex.second))
return ct
- def remove_maximal_simplex(self, simplex):
- """This function removes a given maximal N-simplex from the simplicial
- complex.
+ def get_boundaries(self, simplex):
+ """This function returns a generator with the boundaries of a given N-simplex.
+ If you do not need the filtration values, the boundary can also be obtained as
+ :code:`itertools.combinations(simplex,len(simplex)-1)`.
:param simplex: The N-simplex, represented by a list of vertex.
:type simplex: list of int.
+ :returns: The (simplices of the) boundary of a simplex
+ :rtype: generator with tuples(simplex, filtration)
+ """
+ cdef pair[Simplex_tree_boundary_iterator, Simplex_tree_boundary_iterator] it = self.get_ptr().get_boundary_iterators(simplex)
- .. note::
+ while it.first != it.second:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it.first))
+ preincrement(it.first)
+
+ def remove_maximal_simplex(self, simplex):
+ """This function removes a given maximal N-simplex from the simplicial
+ complex.
- Be aware that removing is shifting data in a flat_map
- (:func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>` to be done).
+ :param simplex: The N-simplex, represented by a list of vertex.
+ :type simplex: list of int
.. note::
The dimension of the simplicial complex may be lower after calling
remove_maximal_simplex than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
method will return the old value, which
remains a valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
to recompute the exact dimension.
"""
self.get_ptr().remove_maximal_simplex(simplex)
@@ -317,37 +452,27 @@ cdef class SimplexTree:
"""Prune above filtration value given as parameter.
:param filtration: Maximum threshold value.
- :type filtration: float.
+ :type filtration: float
:returns: The filtration modification information.
:rtype: bool
.. note::
- Some simplex tree functions require the filtration to be valid.
- prune_above_filtration function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
-
- .. note::
-
Note that the dimension of the simplicial complex may be lower
after calling
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
will return the old value, which remains a
valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
method to recompute the exact dimension.
"""
return self.get_ptr().prune_above_filtration(filtration)
- def expansion(self, max_dim):
- """Expands the Simplex_tree containing only its one skeleton
+ def expansion(self, max_dimension):
+ """Expands the simplex tree containing only its one skeleton
until dimension max_dim.
The expanded simplicial complex until dimension :math:`d`
@@ -357,13 +482,15 @@ cdef class SimplexTree:
The filtration value assigned to a simplex is the maximal filtration
value of one of its edges.
- The Simplex_tree must contain no simplex of dimension bigger than
+ The simplex tree must contain no simplex of dimension bigger than
1 when calling the method.
- :param max_dim: The maximal dimension.
- :type max_dim: int.
+ :param max_dimension: The maximal dimension.
+ :type max_dimension: int
"""
- self.get_ptr().expansion(max_dim)
+ cdef int maxdim = max_dimension
+ with nogil:
+ self.get_ptr().expansion(maxdim)
def make_filtration_non_decreasing(self):
"""This function ensures that each simplex has a higher filtration
@@ -372,31 +499,111 @@ cdef class SimplexTree:
:returns: True if any filtration value was modified,
False if the filtration was already non-decreasing.
:rtype: bool
+ """
+ return self.get_ptr().make_filtration_non_decreasing()
+
+ def reset_filtration(self, filtration, min_dim = 0):
+ """This function resets the filtration value of all the simplices of dimension at least min_dim. Resets all the
+ simplex tree when `min_dim = 0`.
+ `reset_filtration` may break the filtration property with `min_dim > 0`, and it is the user's responsibility to
+ make it a valid filtration (using a large enough `filt_value`, or calling `make_filtration_non_decreasing`
+ afterwards for instance).
+
+ :param filtration: New threshold value.
+ :type filtration: float.
+ :param min_dim: The minimal dimension. Default value is 0.
+ :type min_dim: int.
+ """
+ self.get_ptr().reset_filtration(filtration, min_dim)
+
+ def extend_filtration(self):
+ """ Extend filtration for computing extended persistence. This function only uses the filtration values at the
+ 0-dimensional simplices, and computes the extended persistence diagram induced by the lower-star filtration
+ computed with these values.
+
+ .. note::
+ Note that after calling this function, the filtration values are actually modified within the simplex tree.
+ The function :func:`extended_persistence` retrieves the original values.
.. note::
- Some simplex tree functions require the filtration to be valid.
- make_filtration_non_decreasing function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
+ Note that this code creates an extra vertex internally, so you should make sure that the simplex tree does
+ not contain a vertex with the largest possible value (i.e., 4294967295).
+
+ This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-extended-persistence.ipynb>`_
+ explains how to compute an extension of persistence called extended persistence.
"""
- return self.get_ptr().make_filtration_non_decreasing()
+ self.get_ptr().compute_extended_filtration()
+
+ def extended_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function retrieves good values for extended persistence, and separate the diagrams into the Ordinary,
+ Relative, Extended+ and Extended- subdiagrams.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. Max is 46337.
+ :type homology_coeff_field: int
+ :param min_persistence: The minimum persistence value (i.e., the absolute value of the difference between the
+ persistence diagram point coordinates) to take into account (strictly greater than min_persistence).
+ Default value is 0.0. Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float
+ :returns: A list of four persistence diagrams in the format described in :func:`persistence`. The first one is
+ Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-.
+ See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in
+ https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes.
+
+ .. note::
+
+ This function should be called only if :func:`extend_filtration` has been called first!
+
+ .. note::
+
+ The coordinates of the persistence diagram points might be a little different than the
+ original filtration values due to the internal transformation (scaling to [-2,-1]) that is
+ performed on these values during the computation of extended persistence.
+
+ This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-extended-persistence.ipynb>`_
+ explains how to compute an extension of persistence called extended persistence.
+ """
+ cdef vector[pair[int, pair[double, double]]] persistence_result
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False)
+ self.pcohptr.compute_persistence(homology_coeff_field, -1.)
+ return self.pcohptr.compute_extended_persistence_subdiagrams(min_persistence)
+
+ def expansion_with_blocker(self, max_dim, blocker_func):
+ """Expands the Simplex_tree containing only a graph. Simplices corresponding to cliques in the graph are added
+ incrementally, faces before cofaces, unless the simplex has dimension larger than `max_dim` or `blocker_func`
+ returns `True` for this simplex.
+
+ The function identifies a candidate simplex whose faces are all already in the complex, inserts it with a
+ filtration value corresponding to the maximum of the filtration values of the faces, then calls `blocker_func`
+ with this new simplex (represented as a list of int). If `blocker_func` returns `True`, the simplex is removed,
+ otherwise it is kept. The algorithm then proceeds with the next candidate.
+
+ .. warning::
+ Several candidates of the same dimension may be inserted simultaneously before calling `blocker_func`, so
+ if you examine the complex in `blocker_func`, you may hit a few simplices of the same dimension that have
+ not been vetted by `blocker_func` yet, or have already been rejected but not yet removed.
+
+ :param max_dim: Expansion maximal dimension value.
+ :type max_dim: int
+ :param blocker_func: Blocker oracle.
+ :type blocker_func: Callable[[List[int]], bool]
+ """
+ self.get_ptr().expansion_with_blockers_callback(max_dim, callback, <void*>blocker_func)
def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
- """This function returns the persistence of the simplicial complex.
+ """This function computes and returns the persistence of the simplicial complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number. Default value is 11.
- :type homology_coeff_field: int.
+ prime number. Default value is 11. Max is 46337.
+ :type homology_coeff_field: int
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
0.0.
- Sets min_persistence to -1.0 to see all values.
- :type min_persistence: float.
+ Set min_persistence to -1.0 to see all values.
+ :type min_persistence: float
:param persistence_dim_max: If true, the persistent homology for the
maximal dimension in the complex is computed. If false, it is
ignored. Default is false.
@@ -404,13 +611,36 @@ cdef class SimplexTree:
:returns: The persistence of the simplicial complex.
:rtype: list of pairs(dimension, pair(birth, death))
"""
+ self.compute_persistence(homology_coeff_field, min_persistence, persistence_dim_max)
+ return self.pcohptr.get_persistence()
+
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
+ """This function computes the persistence of the simplicial complex, so it can be accessed through
+ :func:`persistent_betti_numbers`, :func:`persistence_pairs`, etc. This function is equivalent to :func:`persistence`
+ when you do not want the list :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11. Max is 46337.
+ :type homology_coeff_field: int
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float
+ :param persistence_dim_max: If true, the persistent homology for the
+ maximal dimension in the complex is computed. If false, it is
+ ignored. Default is false.
+ :type persistence_dim_max: bool
+ :returns: Nothing.
+ """
if self.pcohptr != NULL:
del self.pcohptr
- self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ cdef bool pdm = persistence_dim_max
+ cdef int coef = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), pdm)
+ self.pcohptr.compute_persistence(coef, minp)
def betti_numbers(self):
"""This function returns the Betti numbers of the simplicial complex.
@@ -419,16 +649,11 @@ cdef class SimplexTree:
:rtype: list of int
:note: betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- else:
- print("betti_numbers function requires persistence function"
- " to be launched first.")
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the
@@ -436,46 +661,40 @@ cdef class SimplexTree:
:param from_value: The persistence birth limit to be added in the
numbers (persistent birth <= from_value).
- :type from_value: float.
+ :type from_value: float
:param to_value: The persistence death limit to be added in the
numbers (persistent death > to_value).
- :type to_value: float.
+ :type to_value: float
:returns: The persistent Betti numbers ([B0, B1, ..., Bn]).
:rtype: list of int
:note: persistent_betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- else:
- print("persistent_betti_numbers function requires persistence function"
- " to be launched first.")
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the simplicial
complex in a specific dimension.
:param dimension: The specific dimension.
- :type dimension: int.
+ :type dimension: int
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np_array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
def persistence_pairs(self):
"""This function returns a list of persistence birth and death simplices pairs.
@@ -484,33 +703,100 @@ cdef class SimplexTree:
:rtype: list of pair of list of int
:note: persistence_pairs function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[vector[int],vector[int]]] persistence_pairs_result
- if self.pcohptr != NULL:
- persistence_pairs_result = self.pcohptr.persistence_pairs()
- else:
- print("persistence_pairs function requires persistence function"
- " to be launched first.")
- return persistence_pairs_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_pairs()"
+ return self.pcohptr.persistence_pairs()
- def write_persistence_diagram(self, persistence_file=''):
+ def write_persistence_diagram(self, persistence_file):
"""This function writes the persistence intervals of the simplicial
complex in a user given file name.
- :param persistence_file: The specific dimension.
- :type persistence_file: string.
+ :param persistence_file: Name of the file.
+ :type persistence_file: string
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- if self.pcohptr != NULL:
- if persistence_file != '':
- self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
- else:
- print("persistence_file must be specified")
+ assert self.pcohptr != NULL, "compute_persistence() must be called before write_persistence_diagram()"
+ self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
+
+ def lower_star_persistence_generators(self):
+ """Assuming this is a lower-star filtration, this function returns the persistence pairs,
+ where each simplex is replaced with the vertex that gave it its filtration value.
+
+ :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity,
+ and second the essential features, grouped by dimension, with one vertex each
+ :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]]
+
+ :note: lower_star_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.lower_star_generators()
+ normal = [np.array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np.array(d) for d in gen.second]
+ return (normal, infinite)
+
+ def flag_persistence_generators(self):
+ """Assuming this is a flag complex, this function returns the persistence pairs,
+ where each simplex is replaced with the vertices of the edges that gave it its filtration value.
+
+ :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death;
+ then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity;
+ then the connected components, with one vertex each;
+ finally the other essential features, grouped by dimension, with 2 vertices for birth.
+ :rtype: Tuple[numpy.array[int] of shape (n,3), List[numpy.array[int] of shape (m,4)], numpy.array[int] of shape (l,), List[numpy.array[int] of shape (k,2)]]
+
+ :note: flag_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.flag_generators()
+ if len(gen.first) == 0:
+ normal0 = np.empty((0,3))
+ normals = []
else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
+ l = iter(gen.first)
+ normal0 = np.array(next(l)).reshape(-1,3)
+ normals = [np.array(d).reshape(-1,4) for d in l]
+ if len(gen.second) == 0:
+ infinite0 = np.empty(0)
+ infinites = []
+ else:
+ l = iter(gen.second)
+ infinite0 = np.array(next(l))
+ infinites = [np.array(d).reshape(-1,2) for d in l]
+ return (normal0, normals, infinite0, infinites)
+
+ def collapse_edges(self, nb_iterations = 1):
+ """Assuming the complex is a graph (simplices of higher dimension are ignored), this method implicitly
+ interprets it as the 1-skeleton of a flag complex, and replaces it with another (smaller) graph whose
+ expansion has the same persistent homology, using a technique known as edge collapses
+ (see :cite:`edgecollapsearxiv`).
+
+ A natural application is to get a simplex tree of dimension 1 from :class:`~gudhi.RipsComplex`,
+ then collapse edges, perform :meth:`expansion()` and finally compute persistence
+ (cf. :download:`rips_complex_edge_collapse_example.py <../example/rips_complex_edge_collapse_example.py>`).
+
+ :param nb_iterations: The number of edge collapse iterations to perform. Default is 1.
+ :type nb_iterations: int
+ """
+ # Backup old pointer
+ cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
+ cdef int nb_iter = nb_iterations
+ with nogil:
+ # New pointer is a new collapsed simplex tree
+ self.thisptr = <intptr_t>(ptr.collapse_edges(nb_iter))
+ # Delete old pointer
+ del ptr
+
+ def __eq__(self, other:SimplexTree):
+ """Test for structural equality
+ :returns: True if the 2 simplex trees are equal, False otherwise.
+ :rtype: bool
+ """
+ return dereference(self.get_ptr()) == dereference(other.get_ptr())
+
+cdef intptr_t _get_copy_intptr(SimplexTree stree) nogil:
+ return <intptr_t>(new Simplex_tree_interface_full_featured(dereference(stree.get_ptr())))
diff --git a/src/python/gudhi/sklearn/__init__.py b/src/python/gudhi/sklearn/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/sklearn/__init__.py
diff --git a/src/python/gudhi/sklearn/cubical_persistence.py b/src/python/gudhi/sklearn/cubical_persistence.py
new file mode 100644
index 00000000..672af278
--- /dev/null
+++ b/src/python/gudhi/sklearn/cubical_persistence.py
@@ -0,0 +1,110 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .. import CubicalComplex
+from sklearn.base import BaseEstimator, TransformerMixin
+
+import numpy as np
+# joblib is required by scikit-learn
+from joblib import Parallel, delayed
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>CubicalPersistence: fit_transform(X)
+# CubicalPersistence->>thread1: _tranform(X[0])
+# CubicalPersistence->>thread2: _tranform(X[1])
+# Note right of CubicalPersistence: ...
+# thread1->>CubicalPersistence: [array( H0(X[0]) ), array( H1(X[0]) )]
+# thread2->>CubicalPersistence: [array( H0(X[1]) ), array( H1(X[1]) )]
+# Note right of CubicalPersistence: ...
+# CubicalPersistence->>USER: [[array( H0(X[0]) ), array( H1(X[0]) )],<br/> [array( H0(X[1]) ), array( H1(X[1]) )],<br/> ...]
+
+
+class CubicalPersistence(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence diagrams from a cubical complex.
+ """
+
+ def __init__(
+ self,
+ homology_dimensions,
+ newshape=None,
+ homology_coeff_field=11,
+ min_persistence=0.0,
+ n_jobs=None,
+ ):
+ """
+ Constructor for the CubicalPersistence class.
+
+ Parameters:
+ homology_dimensions (int or list of int): The returned persistence diagrams dimension(s).
+ Short circuit the use of :class:`~gudhi.representations.preprocessing.DimensionSelector` when only one
+ dimension matters (in other words, when `homology_dimensions` is an int).
+ newshape (tuple of ints): If cells filtration values require to be reshaped
+ (cf. :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform`), set `newshape`
+ to perform `numpy.reshape(X, newshape, order='C')` in
+ :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform` method.
+ homology_coeff_field (int): The homology coefficient field. Must be a prime number. Default value is 11.
+ min_persistence (float): The minimum persistence value to take into account (strictly greater than
+ `min_persistence`). Default value is `0.0`. Set `min_persistence` to `-1.0` to see all values.
+ n_jobs (int): cf. https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html
+ """
+ self.homology_dimensions = homology_dimensions
+ self.newshape = newshape
+ self.homology_coeff_field = homology_coeff_field
+ self.min_persistence = min_persistence
+ self.n_jobs = n_jobs
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def __transform(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return [
+ cubical_complex.persistence_intervals_in_dimension(dim) for dim in self.homology_dimensions
+ ]
+
+ def __transform_only_this_dim(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return cubical_complex.persistence_intervals_in_dimension(self.homology_dimensions)
+
+ def transform(self, X, Y=None):
+ """Compute all the cubical complexes and their associated persistence diagrams.
+
+ :param X: List of cells filtration values (`numpy.reshape(X, newshape, order='C'` if `newshape` is set with a tuple of ints).
+ :type X: list of list of float OR list of numpy.ndarray
+
+ :return: Persistence diagrams in the format:
+
+ - If `homology_dimensions` was set to `n`: `[array( Hn(X[0]) ), array( Hn(X[1]) ), ...]`
+ - If `homology_dimensions` was set to `[i, j]`: `[[array( Hi(X[0]) ), array( Hj(X[0]) )], [array( Hi(X[1]) ), array( Hj(X[1]) )], ...]`
+ :rtype: list of (,2) array_like or list of list of (,2) array_like
+ """
+ if self.newshape is not None:
+ X = np.reshape(X, self.newshape, order='C')
+
+ # Depends on homology_dimensions is an integer or a list of integer (else case)
+ if isinstance(self.homology_dimensions, int):
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(
+ delayed(self.__transform_only_this_dim)(cells) for cells in X
+ )
+ else:
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(delayed(self.__transform)(cells) for cells in X)
+
diff --git a/src/python/gudhi/subsampling.pyx b/src/python/gudhi/subsampling.pyx
index f77c6f75..46f32335 100644
--- a/src/python/gudhi/subsampling.pyx
+++ b/src/python/gudhi/subsampling.pyx
@@ -33,7 +33,7 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
The iteration starts with the landmark `starting point`.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -42,14 +42,15 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
And in both cases
- :param nb_points: Number of points of the subsample.
- :type nb_points: unsigned.
+ :param nb_points: Number of points of the subsample (the subsample may be \
+ smaller if there are fewer than nb_points distinct input points)
+ :type nb_points: int
:param starting_point: The iteration starts with the landmark `starting \
- point`,which is the index of the point to start with. If not set, this \
+ point`, which is the index of the point to start with. If not set, this \
index is chosen randomly.
- :type starting_point: unsigned.
+ :type starting_point: int
:returns: The subsample point set.
- :rtype: List[List[float]].
+ :rtype: List[List[float]]
"""
if off_file:
if os.path.isfile(off_file):
@@ -76,7 +77,7 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
"""Subsample a point set by picking random vertices.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -86,7 +87,7 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
And in both cases
:param nb_points: Number of points of the subsample.
- :type nb_points: unsigned.
+ :type nb_points: int
:returns: The subsample point set.
:rtype: List[List[float]]
"""
@@ -104,10 +105,10 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
"""Outputs a subset of the input points so that the squared distance
- between any two points is greater than or equal to min_squared_dist.
+ between any two points is greater than min_squared_dist.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -118,7 +119,7 @@ def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
:param min_squared_dist: Minimum squared distance separating the output \
points.
- :type min_squared_dist: float.
+ :type min_squared_dist: float
:returns: The subsample point set.
:rtype: List[List[float]]
"""
diff --git a/src/python/gudhi/tensorflow/__init__.py b/src/python/gudhi/tensorflow/__init__.py
new file mode 100644
index 00000000..1599cf52
--- /dev/null
+++ b/src/python/gudhi/tensorflow/__init__.py
@@ -0,0 +1,5 @@
+from .cubical_layer import CubicalLayer
+from .lower_star_simplex_tree_layer import LowerStarSimplexTreeLayer
+from .rips_layer import RipsLayer
+
+__all__ = ["LowerStarSimplexTreeLayer", "RipsLayer", "CubicalLayer"]
diff --git a/src/python/gudhi/tensorflow/cubical_layer.py b/src/python/gudhi/tensorflow/cubical_layer.py
new file mode 100644
index 00000000..5df2c370
--- /dev/null
+++ b/src/python/gudhi/tensorflow/cubical_layer.py
@@ -0,0 +1,82 @@
+import numpy as np
+import tensorflow as tf
+from ..cubical_complex import CubicalComplex
+
+######################
+# Cubical filtration #
+######################
+
+# The parameters of the model are the pixel values.
+
+def _Cubical(Xflat, Xdim, dimensions, homology_coeff_field):
+ # Parameters: Xflat (flattened image),
+ # Xdim (shape of non-flattened image)
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ # We reverse the dimensions because CubicalComplex uses Fortran ordering
+ cc = CubicalComplex(dimensions=Xdim[::-1], top_dimensional_cells=Xflat)
+ cc.compute_persistence(homology_coeff_field=homology_coeff_field)
+
+ # Retrieve and output image indices/pixels corresponding to positive and negative simplices
+ cof_pp = cc.cofaces_of_persistence_pairs()
+
+ L_cofs = []
+ for dim in dimensions:
+
+ try:
+ cof = cof_pp[0][dim]
+ except IndexError:
+ cof = np.array([])
+
+ L_cofs.append(np.array(cof, dtype=np.int32))
+
+ return L_cofs
+
+class CubicalLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing the persistent homology of a cubical complex
+ """
+ def __init__(self, homology_dimensions, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the CubicalLayer class
+
+ Parameters:
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = homology_dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0.] * len(self.dimensions)
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute persistence diagram associated to a cubical complex filtered by some pixel values
+
+ Parameters:
+ X (TensorFlow variable): pixel values of the cubical complex
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of cubical persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively. Note that the essential part is always empty in cubical persistence diagrams, except in homology dimension zero, where the essential part always contains a single point, with abscissa equal to the smallest value in the complex, and infinite ordinate
+ """
+ # Compute pixels associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ Xflat = tf.reshape(X, [-1])
+ Xdim, Xflat_numpy = X.shape, Xflat.numpy()
+ indices_list = _Cubical(Xflat_numpy, Xdim, self.dimensions, self.hcf)
+ index_essential = np.argmin(Xflat_numpy) # index of minimum pixel value for essential persistence diagram
+ # Get persistence diagram by simply picking the corresponding entries in the image
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ finite_dgm = tf.reshape(tf.gather(Xflat, indices_list[idx_dim]), [-1,2])
+ essential_dgm = tf.reshape(tf.gather(Xflat, index_essential), [-1,1]) if dimension == 0 else tf.zeros([0, 1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices), [-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
diff --git a/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
new file mode 100644
index 00000000..5a8e5b75
--- /dev/null
+++ b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
@@ -0,0 +1,87 @@
+import numpy as np
+import tensorflow as tf
+
+#########################################
+# Lower star filtration on simplex tree #
+#########################################
+
+# The parameters of the model are the vertex function values of the simplex tree.
+
+def _LowerStarSimplexTree(simplextree, filtration, dimensions, homology_coeff_field):
+ # Parameters: simplextree (simplex tree on which to compute persistence)
+ # filtration (function values on the vertices of st),
+ # dimensions (homology dimensions),
+ # homology_coeff_field (homology field coefficient)
+
+ simplextree.reset_filtration(-np.inf, 0)
+
+ # Assign new filtration values
+ for i in range(simplextree.num_vertices()):
+ simplextree.assign_filtration([i], filtration[i])
+ simplextree.make_filtration_non_decreasing()
+
+ # Compute persistence diagram
+ simplextree.compute_persistence(homology_coeff_field=homology_coeff_field)
+
+ # Get vertex pairs for optimization. First, get all simplex pairs
+ pairs = simplextree.lower_star_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ finite_pairs = pairs[0][dimension] if len(pairs[0]) >= dimension+1 else np.empty(shape=[0,2])
+ essential_pairs = pairs[1][dimension] if len(pairs[1]) >= dimension+1 else np.empty(shape=[0,1])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class LowerStarSimplexTreeLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing lower-star persistence out of a simplex tree
+ """
+ def __init__(self, simplextree, homology_dimensions, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the LowerStarSimplexTreeLayer class
+
+ Parameters:
+ simplextree (gudhi.SimplexTree): underlying simplex tree. Its vertices MUST be named with integers from 0 to n-1, where n is its number of vertices. Note that its filtration values are modified in each call of the class.
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = homology_dimensions
+ self.simplextree = simplextree
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, filtration):
+ """
+ Compute lower-star persistence diagram associated to a function defined on the vertices of the simplex tree
+
+ Parameters:
+ F (TensorFlow variable): filter function values over the vertices of the simplex tree. The ith entry of F corresponds to vertex i in self.simplextree
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of lower-star persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Don't try to compute gradients for the vertex pairs
+ indices = _LowerStarSimplexTree(self.simplextree, filtration.numpy(), self.dimensions, self.hcf)
+ # Get persistence diagrams
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ finite_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][0]), [-1,2])
+ essential_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][1]), [-1,1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/gudhi/tensorflow/rips_layer.py b/src/python/gudhi/tensorflow/rips_layer.py
new file mode 100644
index 00000000..2a73472c
--- /dev/null
+++ b/src/python/gudhi/tensorflow/rips_layer.py
@@ -0,0 +1,93 @@
+import numpy as np
+import tensorflow as tf
+from ..rips_complex import RipsComplex
+
+############################
+# Vietoris-Rips filtration #
+############################
+
+# The parameters of the model are the point coordinates.
+
+def _Rips(DX, max_edge, dimensions, homology_coeff_field):
+ # Parameters: DX (distance matrix),
+ # max_edge (maximum edge length for Rips filtration),
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ rc = RipsComplex(distance_matrix=DX, max_edge_length=max_edge)
+ st = rc.create_simplex_tree(max_dimension=max(dimensions)+1)
+ st.compute_persistence(homology_coeff_field=homology_coeff_field)
+ pairs = st.flag_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ if dimension == 0:
+ finite_pairs = pairs[0]
+ essential_pairs = pairs[2]
+ else:
+ finite_pairs = pairs[1][dimension-1] if len(pairs[1]) >= dimension else np.empty(shape=[0,4])
+ essential_pairs = pairs[3][dimension-1] if len(pairs[3]) >= dimension else np.empty(shape=[0,2])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class RipsLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing Rips persistence out of a point cloud
+ """
+ def __init__(self, homology_dimensions, maximum_edge_length=np.inf, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the RipsLayer class
+
+ Parameters:
+ maximum_edge_length (float): maximum edge length for the Rips complex
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.max_edge = maximum_edge_length
+ self.dimensions = homology_dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute Rips persistence diagram associated to a point cloud
+
+ Parameters:
+ X (TensorFlow variable): point cloud of shape [number of points, number of dimensions]
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of Rips persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Compute distance matrix
+ DX = tf.norm(tf.expand_dims(X, 1)-tf.expand_dims(X, 0), axis=2)
+ # Compute vertices associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ indices = _Rips(DX.numpy(), self.max_edge, self.dimensions, self.hcf)
+ # Get persistence diagrams by simply picking the corresponding entries in the distance matrix
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ cur_idx = indices[idx_dim]
+ if dimension > 0:
+ finite_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[0], [-1,2])), [-1,2])
+ essential_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[1], [-1,2])), [-1,1])
+ else:
+ reshaped_cur_idx = tf.reshape(cur_idx[0], [-1,3])
+ finite_dgm = tf.concat([tf.zeros([reshaped_cur_idx.shape[0],1]), tf.reshape(tf.gather_nd(DX, reshaped_cur_idx[:,1:]), [-1,1])], axis=1)
+ essential_dgm = tf.zeros([cur_idx[1].shape[0],1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py
deleted file mode 100644
index db5ddff2..00000000
--- a/src/python/gudhi/wasserstein.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Theo Lacombe
-#
-# Copyright (C) 2019 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-import numpy as np
-import scipy.spatial.distance as sc
-try:
- import ot
-except ImportError:
- print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
-
-def _proj_on_diag(X):
- '''
- :param X: (n x 2) array encoding the points of a persistent diagram.
- :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal
- '''
- Z = (X[:,0] + X[:,1]) / 2.
- return np.array([Z , Z]).T
-
-
-def _build_dist_matrix(X, Y, order=2., internal_p=2.):
- '''
- :param X: (n x 2) numpy.array encoding the (points of the) first diagram.
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param internal_p: Ground metric (i.e. norm l_p).
- :param order: exponent for the Wasserstein metric.
- :returns: (n+1) x (m+1) np.array encoding the cost matrix C.
- For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal.
- note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal).
- '''
- Xdiag = _proj_on_diag(X)
- Ydiag = _proj_on_diag(Y)
- if np.isinf(internal_p):
- C = sc.cdist(X,Y, metric='chebyshev')**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
- else:
- C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
- Cf = np.hstack((C, Cxd[:,None]))
- Cdy = np.append(Cdy, 0)
-
- Cf = np.vstack((Cf, Cdy[None,:]))
-
- return Cf
-
-
-def _perstot(X, order, internal_p):
- '''
- :param X: (n x 2) numpy.array (points of a given diagram).
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (Euclidean norm).
- :param order: exponent for Wasserstein. Default value is 2.
- :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
- '''
- Xdiag = _proj_on_diag(X)
- return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order)
-
-
-def wasserstein_distance(X, Y, order=2., internal_p=2.):
- '''
- :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (euclidean norm).
- :param order: exponent for Wasserstein; Default value is 2.
- :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric.
- :rtype: float
- '''
- n = len(X)
- m = len(Y)
-
- # handle empty diagrams
- if X.size == 0:
- if Y.size == 0:
- return 0.
- else:
- return _perstot(Y, order, internal_p)
- elif Y.size == 0:
- return _perstot(X, order, internal_p)
-
- M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
- a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT
- b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- b[-1] = b[-1] * n # so that we have a probability measure, required by POT
-
- # Comptuation of the otcost using the ot.emd2 library.
- # Note: it is the Wasserstein distance to the power q.
- # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
- ot_cost = (n+m) * ot.emd2(a, b, M, numItermax=2000000)
-
- return ot_cost ** (1./order)
diff --git a/src/python/gudhi/wasserstein/__init__.py b/src/python/gudhi/wasserstein/__init__.py
new file mode 100644
index 00000000..ed225ba4
--- /dev/null
+++ b/src/python/gudhi/wasserstein/__init__.py
@@ -0,0 +1 @@
+from .wasserstein import wasserstein_distance
diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py
new file mode 100644
index 00000000..bb6e641e
--- /dev/null
+++ b/src/python/gudhi/wasserstein/barycenter.py
@@ -0,0 +1,146 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Theo Lacombe
+#
+# Copyright (C) 2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+
+import ot
+import numpy as np
+import scipy.spatial.distance as sc
+
+from gudhi.wasserstein import wasserstein_distance
+
+
+def _mean(x, m):
+ '''
+ :param x: a list of 2D-points, off diagonal, x_0... x_{k-1}
+ :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal
+ :returns: the weighted mean of x with (m-k) copies of the diagonal
+ '''
+ k = len(x)
+ if k > 0:
+ w = np.mean(x, axis=0)
+ w_delta = (w[0] + w[1]) / 2 * np.ones(2)
+ return (k * w + (m-k) * w_delta) / m
+ else:
+ return np.array([0, 0])
+
+
+def lagrangian_barycenter(pdiagset, init=None, verbose=False):
+ '''
+ :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` (`n` can variate), encoding a set of persistence
+ diagrams with only finite coordinates.
+ :param init: The initial value for barycenter estimate.
+ If ``None``, init is made on a random diagram from the dataset.
+ Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``)
+ or a `(n x 2)` ``numpy.array`` encoding a persistence diagram with `n` points.
+ :type init: ``int``, or (n x 2) ``np.array``
+ :param verbose: if ``True``, returns additional information about the barycenter.
+ :type verbose: boolean
+ :returns: If not verbose (default), a ``numpy.array`` encoding the barycenter estimate of pdiagset
+ (local minimum of the energy function).
+ If ``pdiagset`` is empty, returns ``None``.
+ If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate,
+ and ``log`` is a ``dict`` that contains additional information:
+
+ - `"groupings"`, a list of list of pairs ``(i,j)``. Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that `pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal.
+
+ - `"energy"`, ``float`` representing the Frechet energy value obtained. It is the mean of squared distances of observations to the output.
+
+ - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm.
+ '''
+ X = pdiagset # to shorten notations, not a copy
+ m = len(X) # number of diagrams we are averaging
+ if m == 0:
+ print("Warning: computing barycenter of empty diag set. Returns None")
+ return None
+
+ # store the number of off-diagonal point for each of the X_i
+ nb_off_diag = np.array([len(X_i) for X_i in X])
+ # Initialisation of barycenter
+ if init is None:
+ i0 = np.random.randint(m) # Index of first state for the barycenter
+ Y = X[i0].copy()
+ else:
+ if type(init)==int:
+ Y = X[init].copy()
+ else:
+ Y = init.copy()
+
+ nb_iter = 0
+
+ converged = False # stopping criterion
+ while not converged:
+ nb_iter += 1
+ K = len(Y) # current nb of points in Y (some might be on diagonal)
+ G = np.full((K, m), -1, dtype=int) # will store for each j, the (index)
+ # point matched in each other diagram
+ #(might be the diagonal).
+ # that is G[j, i] = k <=> y_j is matched to
+ # x_k in the diagram i-th diagram X[i]
+ updated_points = np.zeros((K, 2)) # will store the new positions of
+ # the points of Y.
+ # If points disappear, there thrown
+ # on [0,0] by default.
+ new_created_points = [] # will store potential new points.
+
+ # Step 1 : compute optimal matching (Y, X_i) for each X_i
+ # and create new points in Y if needed
+ for i in range(m):
+ _, indices = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.)
+ for y_j, x_i_j in indices:
+ if y_j >= 0: # we matched an off diagonal point to x_i_j...
+ if x_i_j >= 0: # ...which is also an off-diagonal point.
+ G[y_j, i] = x_i_j
+ else: # ...which is a diagonal point
+ G[y_j, i] = -1 # -1 stands for the diagonal (mask)
+ else: # We matched a diagonal point to x_i_j...
+ if x_i_j >= 0: # which is a off-diag point !
+ # need to create new point in Y
+ new_y = _mean(np.array([X[i][x_i_j]]), m)
+ # Average this point with (m-1) copies of Delta
+ new_created_points.append(new_y)
+
+ # Step 2 : Update current point position thanks to groupings computed
+ to_delete = []
+ for j in range(K):
+ matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1]
+ new_y_j = _mean(matched_points, m)
+ if not np.array_equal(new_y_j, np.array([0,0])):
+ updated_points[j] = new_y_j
+ else: # this points is no longer of any use.
+ to_delete.append(j)
+ # we remove the point to be deleted now.
+ updated_points = np.delete(updated_points, to_delete, axis=0)
+
+ # we cannot converge if there have been new created points.
+ if new_created_points:
+ Y = np.concatenate((updated_points, new_created_points))
+ else:
+ # Step 3 : we check convergence
+ if np.array_equal(updated_points, Y):
+ converged = True
+ Y = updated_points
+
+
+ if verbose:
+ groupings = []
+ energy = 0
+ log = {}
+ n_y = len(Y)
+ for i in range(m):
+ cost, edges = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.)
+ groupings.append(edges)
+ energy += cost
+ log["groupings"] = groupings
+ energy = energy/m
+ log["energy"] = energy
+ log["nb_iter"] = nb_iter
+
+ return Y, log
+ else:
+ return Y
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
new file mode 100644
index 00000000..dc18806e
--- /dev/null
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -0,0 +1,355 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Theo Lacombe
+#
+# Copyright (C) 2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+import scipy.spatial.distance as sc
+import warnings
+
+try:
+ import ot
+except ImportError:
+ print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
+
+
+# Currently unused, but Théo says it is likely to be used again.
+def _proj_on_diag(X):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal
+ '''
+ Z = (X[:,0] + X[:,1]) / 2.
+ return np.array([Z , Z]).T
+
+
+def _dist_to_diag(X, internal_p):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :param internal_p: Ground metric (i.e. norm L^p).
+ :returns: (n) array encoding the (respective orthogonal) distances of the points to the diagonal
+
+ .. note::
+ Assumes that the points are above the diagonal.
+ '''
+ return (X[:, 1] - X[:, 0]) * 2 ** (1.0 / internal_p - 1)
+
+
+def _build_dist_matrix(X, Y, order, internal_p):
+ '''
+ :param X: (n x 2) numpy.array encoding the (points of the) first diagram.
+ :param Y: (m x 2) numpy.array encoding the second diagram.
+ :param order: exponent for the Wasserstein metric.
+ :param internal_p: Ground metric (i.e. norm L^p).
+ :returns: (n+1) x (m+1) np.array encoding the cost matrix C.
+ For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j],
+ while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j])
+ and its orthogonal projection onto the diagonal.
+ note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal).
+ '''
+ Cxd = _dist_to_diag(X, internal_p)**order
+ Cdy = _dist_to_diag(Y, internal_p)**order
+ if np.isinf(internal_p):
+ C = sc.cdist(X,Y, metric='chebyshev')**order
+ else:
+ C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
+ Cf = np.hstack((C, Cxd[:,None]))
+ Cdy = np.append(Cdy, 0)
+
+ Cf = np.vstack((Cf, Cdy[None,:]))
+
+ return Cf
+
+
+def _perstot_autodiff(X, order, internal_p):
+ '''
+ Version of _perstot that works on eagerpy tensors.
+ '''
+ return _dist_to_diag(X, internal_p).norms.lp(order)
+
+
+def _perstot(X, order, internal_p, enable_autodiff):
+ '''
+ :param X: (n x 2) numpy.array (points of a given diagram).
+ :param order: exponent for Wasserstein.
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2).
+ :param enable_autodiff: If X is torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation.
+ :type enable_autodiff: bool
+ :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
+
+ .. note::
+ Can be +inf if the diagram has an essential part (points with infinite coordinates).
+ '''
+ if enable_autodiff:
+ import eagerpy as ep
+
+ return _perstot_autodiff(ep.astensor(X), order, internal_p).raw
+ else:
+ return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
+
+
+def _get_essential_parts(a):
+ '''
+ :param a: (n x 2) numpy.array (point of a diagram)
+ :returns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite
+ coordinates that can occur in a diagram, namely:
+ type0 : (-inf, finite)
+ type1 : (finite, +inf)
+ type2 : (-inf, +inf)
+ type3 : (-inf, -inf)
+ type4 : (+inf, +inf)
+ .. note::
+ For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x.
+ Note also that points with (+inf, -inf) are not handled (points (x,y) in dgm satisfy by assumption (y >= x)).
+
+ Finally, we consider that points with coordinates (-inf,-inf) and (+inf, +inf) belong to the diagonal.
+ '''
+ if len(a):
+ first_coord_finite = np.isfinite(a[:,0])
+ second_coord_finite = np.isfinite(a[:,1])
+ first_coord_infinite_positive = (a[:,0] == np.inf)
+ second_coord_infinite_positive = (a[:,1] == np.inf)
+ first_coord_infinite_negative = (a[:,0] == -np.inf)
+ second_coord_infinite_negative = (a[:,1] == -np.inf)
+
+ ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x)
+ ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf)
+ ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf)
+
+ ess_fourth_type = np.where(first_coord_infinite_negative & second_coord_infinite_negative)[0] # coord (-inf, -inf)
+ ess_fifth_type = np.where(first_coord_infinite_positive & second_coord_infinite_positive)[0] # coord (+inf, +inf)
+ return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type
+ else:
+ return [], [], [], [], []
+
+
+def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis):
+ '''
+ :param X: (n x 2) numpy.array (dgm points)
+ :param Y: (n x 2) numpy.array (dgm points)
+ :param idX: indices to consider for this one dimensional OT problem (in X)
+ :param idY: indices to consider for this one dimensional OT problem (in Y)
+ :param order: exponent for Wasserstein distance computation
+ :param axis: must be 0 or 1, correspond to the coordinate which is finite.
+ :returns: cost (float) and match for points with *one* infinite coordinate.
+
+ .. note::
+ Assume idX, idY come when calling _handle_essential_parts, thus have same length.
+ '''
+ u = X[idX, axis]
+ v = Y[idY, axis]
+
+ cost = np.sum(np.abs(np.sort(u) - np.sort(v))**(order)) # OT cost in 1D
+
+ sortidX = idX[np.argsort(u)]
+ sortidY = idY[np.argsort(v)]
+ # We return [i,j] sorted per value
+ match = list(zip(sortidX, sortidY))
+
+ return cost, match
+
+
+def _handle_essential_parts(X, Y, order):
+ '''
+ :param X: (n x 2) numpy array, first diagram.
+ :param Y: (n x 2) numpy array, second diagram.
+ :order: Wasserstein order for cost computation.
+ :returns: cost and matching due to essential parts. If cost is +inf, matching will be set to None.
+ '''
+ ess_parts_X = _get_essential_parts(X)
+ ess_parts_Y = _get_essential_parts(Y)
+
+ # Treats the case of infinite cost (cardinalities of essential parts differ).
+ for u, v in list(zip(ess_parts_X, ess_parts_Y))[:3]: # ignore types 4 and 5 as they belong to the diagonal
+ if len(u) != len(v):
+ return np.inf, None
+
+ # Now we know each essential part has the same number of points in both diagrams.
+ # Handle type 0 and type 1 essential parts (those with one finite coordinates)
+ c1, m1 = _cost_and_match_essential_parts(X, Y, ess_parts_X[0], ess_parts_Y[0], axis=1, order=order)
+ c2, m2 = _cost_and_match_essential_parts(X, Y, ess_parts_X[1], ess_parts_Y[1], axis=0, order=order)
+
+ c = c1 + c2
+ m = m1 + m2
+
+ # Handle type3 (coordinates (-inf,+inf), so we just align points)
+ m += list(zip(ess_parts_X[2], ess_parts_Y[2]))
+
+ # Handle type 4 and 5, considered as belonging to the diagonal so matched to (-1) with cost 0.
+ for z in ess_parts_X[3:]:
+ m += [(u, -1) for u in z] # points in X are matched to -1
+ for z in ess_parts_Y[3:]:
+ m += [(-1, v) for v in z] # -1 is match to points in Y
+
+ return c, np.array(m)
+
+
+def _finite_part(X):
+ '''
+ :param X: (n x 2) numpy array encoding a persistence diagram.
+ :returns: The finite part of a diagram `X` (points with finite coordinates).
+ '''
+ return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))]
+
+
+def _warn_infty(matching):
+ '''
+ Handle essential parts with different cardinalities. Warn the user about cost being infinite and (if
+ `matching=True`) about the returned matching being `None`.
+ '''
+ if matching:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf, and the returned matching is None.')
+ return np.inf, None
+ else:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf.')
+ return np.inf
+
+
+def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False,
+ keep_essential_parts=True):
+ '''
+ Compute the Wasserstein distance between persistence diagram using Python Optimal Transport backend.
+ Diagrams can contain points with infinity coordinates (essential parts).
+ Points with (-inf,-inf) and (+inf,+inf) coordinates are considered as belonging to the diagonal.
+ If the distance between two diagrams is +inf (which happens if the cardinalities of essential
+ parts differ) and optimal matching is required, it will be set to ``None``.
+
+ :param X: The first diagram.
+ :type X: n x 2 numpy.array
+ :param Y: The second diagram.
+ :type Y: m x 2 numpy.array
+ :param matching: if ``True``, computes and returns the optimal matching between X and Y, encoded as
+ a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
+ the j-th point in Y, with the convention that (-1) represents the diagonal.
+ :param order: Wasserstein exponent q (1 <= q < infinity).
+ :type order: float
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2).
+ :type internal_p: float
+ :param enable_autodiff: If X and Y are ``torch.tensor`` or ``tensorflow.Tensor``, make the computation
+ transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
+ with ``matching=True`` and with ``keep_essential_parts=True``.
+
+ .. note:: This considers the function defined on the coordinates of the off-diagonal finite points of X and Y
+ and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
+ :type enable_autodiff: bool
+ :param keep_essential_parts: If ``False``, only considers the finite points in the diagrams.
+ Otherwise, include essential parts in cost and matching computation.
+ :type keep_essential_parts: bool
+ :returns: The Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
+ respect to the internal_p-norm as ground metric.
+ If matching is set to True, also returns the optimal matching between X and Y.
+ If cost is +inf, any matching is optimal and thus it returns `None` instead.
+ '''
+
+ # First step: handle empty diagrams
+ n = len(X)
+ m = len(Y)
+
+ if n == 0:
+ if m == 0:
+ if not matching:
+ # What if enable_autodiff?
+ return 0.
+ else:
+ return 0., np.array([])
+ else:
+ cost = _perstot(Y, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
+ else:
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[-1, j] for j in range(m)])
+ elif m == 0:
+ cost = _perstot(X, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
+ else:
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[i, -1] for i in range(n)])
+
+
+ # Check essential part and enable autodiff together
+ if enable_autodiff and keep_essential_parts:
+ warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together.
+ keep_essential_parts is set to False: only points with finite coordinates are considered
+ in the following.
+ ''')
+ keep_essential_parts = False
+
+ # Second step: handle essential parts if needed.
+ if keep_essential_parts:
+ essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order)
+ if (essential_cost == np.inf):
+ return _warn_infty(matching) # Tells the user that cost is infty and matching (if True) is None.
+ # avoid computing transport cost between the finite parts if essential parts
+ # cardinalities do not match (saves time)
+ else:
+ essential_cost = 0
+ essential_matching = None
+
+ # Now the standard pipeline for finite parts
+ if enable_autodiff:
+ import eagerpy as ep
+
+ X_orig = ep.astensor(X)
+ Y_orig = ep.astensor(Y)
+ X = X_orig.numpy()
+ Y = Y_orig.numpy()
+
+ # Extract finite points of the diagrams.
+ X, Y = _finite_part(X), _finite_part(Y)
+ n = len(X)
+ m = len(Y)
+
+ M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
+ a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
+ a[-1] = m
+ b = np.ones(m+1) # weight vector of the input diagram. Uniform here.
+ b[-1] = n
+
+ if matching:
+ assert not enable_autodiff, "matching and enable_autodiff are currently incompatible"
+ P = ot.emd(a=a,b=b,M=M, numItermax=2000000)
+ ot_cost = np.sum(np.multiply(P,M))
+ P[-1, -1] = 0 # Remove matching corresponding to the diagonal
+ match = np.argwhere(P)
+ # Now we turn to -1 points encoding the diagonal
+ match[:,0][match[:,0] >= n] = -1
+ match[:,1][match[:,1] >= m] = -1
+ # Finally incorporate the essential part matching
+ if essential_matching is not None:
+ match = np.concatenate([match, essential_matching]) if essential_matching.size else match
+ return (ot_cost + essential_cost) ** (1./order) , match
+
+ if enable_autodiff:
+ P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
+ pairs_X_Y = np.argwhere(P[:-1, :-1])
+ pairs_X_diag = np.nonzero(P[:-1, -1])
+ pairs_Y_diag = np.nonzero(P[-1, :-1])
+ dists = []
+ # empty arrays are not handled properly by the helpers, so we avoid calling them
+ if len(pairs_X_Y):
+ dists.append((Y_orig[pairs_X_Y[:, 1]] - X_orig[pairs_X_Y[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order))
+ if len(pairs_X_diag[0]):
+ dists.append(_perstot_autodiff(X_orig[pairs_X_diag], order, internal_p))
+ if len(pairs_Y_diag[0]):
+ dists.append(_perstot_autodiff(Y_orig[pairs_Y_diag], order, internal_p))
+ dists = [dist.reshape(1) for dist in dists]
+ return ep.concatenate(dists).norms.lp(order).raw
+ # We can also concatenate the 3 vectors to compute just one norm.
+
+ # Comptuation of the ot cost using the ot.emd2 library.
+ # Note: it is the Wasserstein distance to the power q.
+ # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
+ ot_cost = ot.emd2(a, b, M, numItermax=2000000)
+
+ return (ot_cost + essential_cost) ** (1./order)
diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py
new file mode 100644
index 00000000..16f63c3d
--- /dev/null
+++ b/src/python/gudhi/weighted_rips_complex.py
@@ -0,0 +1,61 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Raphaël Tinarrage, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from gudhi import SimplexTree
+
+class WeightedRipsComplex:
+ """
+ Class to generate a weighted Rips complex from a distance matrix and weights on vertices,
+ in the way described in :cite:`dtmfiltrations` with `p=1`. The filtration value of vertex `i` is `2*weights[i]`,
+ and the filtration value of edge `ij` is `distance_matrix[i][j]+weights[i]+weights[j]`,
+ or the maximum of the filtrations of its extremities, whichever is largest.
+ Remark that all the filtration values are doubled compared to the definition in the paper
+ for consistency with RipsComplex.
+ """
+ def __init__(self,
+ distance_matrix,
+ weights=None,
+ max_filtration=float('inf')):
+ """
+ Args:
+ distance_matrix (Sequence[Sequence[float]]): distance matrix (full square or lower triangular).
+ weights (Sequence[float]): (one half of) weight for each vertex.
+ max_filtration (float): specifies the maximal filtration value to be considered.
+ """
+ self.distance_matrix = distance_matrix
+ if weights is not None:
+ self.weights = weights
+ else:
+ self.weights = [0] * len(distance_matrix)
+ self.max_filtration = max_filtration
+
+ def create_simplex_tree(self, max_dimension):
+ """
+ Args:
+ max_dimension (int): graph expansion until this given dimension.
+ """
+ dist = self.distance_matrix
+ F = self.weights
+ num_pts = len(dist)
+
+ st = SimplexTree()
+
+ for i in range(num_pts):
+ if 2*F[i] <= self.max_filtration:
+ st.insert([i], 2*F[i])
+ for i in range(num_pts):
+ for j in range(i):
+ value = max(2*F[i], 2*F[j], dist[i][j] + F[i] + F[j])
+ # max is needed when F is not 1-Lipschitz
+ if value <= self.max_filtration:
+ st.insert([i,j], filtration=value)
+
+ st.expansion(max_dimension)
+ return st
+
diff --git a/src/python/include/Alpha_complex_factory.h b/src/python/include/Alpha_complex_factory.h
new file mode 100644
index 00000000..41eb72c1
--- /dev/null
+++ b/src/python/include/Alpha_complex_factory.h
@@ -0,0 +1,156 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef INCLUDE_ALPHA_COMPLEX_FACTORY_H_
+#define INCLUDE_ALPHA_COMPLEX_FACTORY_H_
+
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Alpha_complex_3d.h>
+#include <gudhi/Alpha_complex_options.h>
+#include <CGAL/Epeck_d.h>
+#include <CGAL/Epick_d.h>
+
+#include <boost/range/adaptor/transformed.hpp>
+
+#include "Simplex_tree_interface.h"
+
+#include <iostream>
+#include <vector>
+#include <string>
+#include <memory> // for std::unique_ptr
+
+namespace Gudhi {
+
+namespace alpha_complex {
+
+// template Functor that transforms a CGAL point to a vector of double as expected by cython
+template<typename CgalPointType, bool Weighted>
+struct Point_cgal_to_cython;
+
+// Specialized Unweighted Functor
+template<typename CgalPointType>
+struct Point_cgal_to_cython<CgalPointType, false> {
+ std::vector<double> operator()(CgalPointType const& point) const
+ {
+ std::vector<double> vd;
+ vd.reserve(point.dimension());
+ for (auto coord = point.cartesian_begin(); coord != point.cartesian_end(); coord++)
+ vd.push_back(CGAL::to_double(*coord));
+ return vd;
+ }
+};
+
+// Specialized Weighted Functor
+template<typename CgalPointType>
+struct Point_cgal_to_cython<CgalPointType, true> {
+ std::vector<double> operator()(CgalPointType const& weighted_point) const
+ {
+ const auto& point = weighted_point.point();
+ return Point_cgal_to_cython<decltype(point), false>()(point);
+ }
+};
+
+// Function that transforms a cython point (aka. a vector of double) to a CGAL point
+template <typename CgalPointType>
+static CgalPointType pt_cython_to_cgal(std::vector<double> const& vec) {
+ return CgalPointType(vec.size(), vec.begin(), vec.end());
+}
+
+class Abstract_alpha_complex {
+ public:
+ virtual std::vector<double> get_point(int vh) = 0;
+
+ virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
+ bool default_filtration_value) = 0;
+
+ virtual std::size_t num_vertices() const = 0;
+
+ virtual ~Abstract_alpha_complex() = default;
+};
+
+template <bool Weighted = false>
+class Exact_alpha_complex_dD final : public Abstract_alpha_complex {
+ private:
+ using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+ using Bare_point = typename Kernel::Point_d;
+ using Point = std::conditional_t<Weighted, typename Kernel::Weighted_point_d,
+ typename Kernel::Point_d>;
+
+ public:
+ Exact_alpha_complex_dD(const std::vector<std::vector<double>>& points, bool exact_version)
+ : exact_version_(exact_version),
+ alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>)) {
+ }
+
+ Exact_alpha_complex_dD(const std::vector<std::vector<double>>& points,
+ const std::vector<double>& weights, bool exact_version)
+ : exact_version_(exact_version),
+ alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>), weights) {
+ }
+
+ virtual std::vector<double> get_point(int vh) override {
+ // Can be a Weighted or a Bare point in function of Weighted
+ return Point_cgal_to_cython<Point, Weighted>()(alpha_complex_.get_point(vh));
+ }
+
+ virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
+ bool default_filtration_value) override {
+ return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, exact_version_, default_filtration_value);
+ }
+
+ virtual std::size_t num_vertices() const override {
+ return alpha_complex_.num_vertices();
+ }
+
+ private:
+ bool exact_version_;
+ Alpha_complex<Kernel, Weighted> alpha_complex_;
+};
+
+template <bool Weighted = false>
+class Inexact_alpha_complex_dD final : public Abstract_alpha_complex {
+ private:
+ using Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
+ using Bare_point = typename Kernel::Point_d;
+ using Point = std::conditional_t<Weighted, typename Kernel::Weighted_point_d,
+ typename Kernel::Point_d>;
+
+ public:
+ Inexact_alpha_complex_dD(const std::vector<std::vector<double>>& points)
+ : alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>)) {
+ }
+
+ Inexact_alpha_complex_dD(const std::vector<std::vector<double>>& points, const std::vector<double>& weights)
+ : alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>), weights) {
+ }
+
+ virtual std::vector<double> get_point(int vh) override {
+ // Can be a Weighted or a Bare point in function of Weighted
+ return Point_cgal_to_cython<Point, Weighted>()(alpha_complex_.get_point(vh));
+ }
+ virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
+ bool default_filtration_value) override {
+ return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, false, default_filtration_value);
+ }
+
+ virtual std::size_t num_vertices() const override {
+ return alpha_complex_.num_vertices();
+ }
+
+ private:
+ Alpha_complex<Kernel, Weighted> alpha_complex_;
+};
+
+} // namespace alpha_complex
+
+} // namespace Gudhi
+
+#endif // INCLUDE_ALPHA_COMPLEX_FACTORY_H_
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index 8614eee3..469b91ce 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -11,58 +11,64 @@
#ifndef INCLUDE_ALPHA_COMPLEX_INTERFACE_H_
#define INCLUDE_ALPHA_COMPLEX_INTERFACE_H_
-#include <gudhi/Simplex_tree.h>
-#include <gudhi/Alpha_complex.h>
-#include <CGAL/Epeck_d.h>
-#include <CGAL/Epick_d.h>
-
-#include <boost/range/adaptor/transformed.hpp>
+#include "Alpha_complex_factory.h"
+#include <gudhi/Alpha_complex_options.h>
#include "Simplex_tree_interface.h"
#include <iostream>
#include <vector>
#include <string>
+#include <memory> // for std::unique_ptr
namespace Gudhi {
namespace alpha_complex {
class Alpha_complex_interface {
- using Dynamic_kernel = CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >;
- using Point_d = Dynamic_kernel::Point_d;
-
public:
- Alpha_complex_interface(const std::vector<std::vector<double>>& points) {
- auto mkpt = [](std::vector<double> const& vec){
- return Point_d(vec.size(), vec.begin(), vec.end());
- };
- alpha_complex_ = new Alpha_complex<Dynamic_kernel>(boost::adaptors::transform(points, mkpt));
+ Alpha_complex_interface(const std::vector<std::vector<double>>& points,
+ const std::vector<double>& weights,
+ bool fast_version, bool exact_version) {
+ const bool weighted = (weights.size() > 0);
+ if (fast_version) {
+ if (weighted) {
+ alpha_ptr_ = std::make_unique<Inexact_alpha_complex_dD<true>>(points, weights);
+ } else {
+ alpha_ptr_ = std::make_unique<Inexact_alpha_complex_dD<false>>(points);
+ }
+ } else {
+ if (weighted) {
+ alpha_ptr_ = std::make_unique<Exact_alpha_complex_dD<true>>(points, weights, exact_version);
+ } else {
+ alpha_ptr_ = std::make_unique<Exact_alpha_complex_dD<false>>(points, exact_version);
+ }
+ }
}
- Alpha_complex_interface(const std::string& off_file_name, bool from_file = true) {
- alpha_complex_ = new Alpha_complex<Dynamic_kernel>(off_file_name);
+ std::vector<double> get_point(int vh) {
+ return alpha_ptr_->get_point(vh);
}
- ~Alpha_complex_interface() {
- delete alpha_complex_;
+ void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
+ bool default_filtration_value) {
+ // Nothing to be done in case of an empty point set
+ if (alpha_ptr_->num_vertices() > 0)
+ alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value);
}
- std::vector<double> get_point(int vh) {
- std::vector<double> vd;
- Point_d const& ph = alpha_complex_->get_point(vh);
- for (auto coord = ph.cartesian_begin(); coord != ph.cartesian_end(); coord++)
- vd.push_back(CGAL::to_double(*coord));
- return vd;
+ static void set_float_relative_precision(double precision) {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::set_relative_precision_of_to_double(precision);
}
- void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) {
- alpha_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
+ static double get_float_relative_precision() {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ return CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::get_relative_precision_of_to_double();
}
private:
- Alpha_complex<Dynamic_kernel>* alpha_complex_;
+ std::unique_ptr<Abstract_alpha_complex> alpha_ptr_;
};
} // namespace alpha_complex
diff --git a/src/python/include/Euclidean_strong_witness_complex_interface.h b/src/python/include/Euclidean_strong_witness_complex_interface.h
index c1c72737..f94c51ef 100644
--- a/src/python/include/Euclidean_strong_witness_complex_interface.h
+++ b/src/python/include/Euclidean_strong_witness_complex_interface.h
@@ -50,12 +50,10 @@ class Euclidean_strong_witness_complex_interface {
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
std::vector<double> get_point(unsigned vh) {
diff --git a/src/python/include/Euclidean_witness_complex_interface.h b/src/python/include/Euclidean_witness_complex_interface.h
index 5d7dbdc2..4411ae79 100644
--- a/src/python/include/Euclidean_witness_complex_interface.h
+++ b/src/python/include/Euclidean_witness_complex_interface.h
@@ -49,12 +49,10 @@ class Euclidean_witness_complex_interface {
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
std::vector<double> get_point(unsigned vh) {
diff --git a/src/python/include/Nerve_gic_interface.h b/src/python/include/Nerve_gic_interface.h
index 5e7f8ae6..ab14c318 100644
--- a/src/python/include/Nerve_gic_interface.h
+++ b/src/python/include/Nerve_gic_interface.h
@@ -29,7 +29,6 @@ class Nerve_gic_interface : public Cover_complex<std::vector<double>> {
public:
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree) {
create_complex(*simplex_tree);
- simplex_tree->initialize_filtration();
}
void set_cover_from_Euclidean_Voronoi(int m) {
set_cover_from_Voronoi(Gudhi::Euclidean_distance(), m);
diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h
index 8c79e6f3..945378a0 100644
--- a/src/python/include/Persistent_cohomology_interface.h
+++ b/src/python/include/Persistent_cohomology_interface.h
@@ -12,10 +12,14 @@
#define INCLUDE_PERSISTENT_COHOMOLOGY_INTERFACE_H_
#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/Simplex_tree.h> // for Extended_simplex_type
+
+#include <cstdlib>
#include <vector>
#include <utility> // for std::pair
#include <algorithm> // for sort
+#include <unordered_map>
namespace Gudhi {
@@ -23,82 +27,242 @@ template<class FilteredComplex>
class Persistent_cohomology_interface : public
persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp> {
private:
+ typedef persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp> Base;
/*
* Compare two intervals by dimension, then by length.
*/
struct cmp_intervals_by_dim_then_length {
- explicit cmp_intervals_by_dim_then_length(FilteredComplex * sc)
- : sc_(sc) { }
-
template<typename Persistent_interval>
bool operator()(const Persistent_interval & p1, const Persistent_interval & p2) {
- if (sc_->dimension(get < 0 > (p1)) == sc_->dimension(get < 0 > (p2)))
- return (sc_->filtration(get < 1 > (p1)) - sc_->filtration(get < 0 > (p1))
- > sc_->filtration(get < 1 > (p2)) - sc_->filtration(get < 0 > (p2)));
+ if (std::get<0>(p1) == std::get<0>(p2)) {
+ auto& i1 = std::get<1>(p1);
+ auto& i2 = std::get<1>(p2);
+ return std::get<1>(i1) - std::get<0>(i1) > std::get<1>(i2) - std::get<0>(i2);
+ }
else
- return (sc_->dimension(get < 0 > (p1)) > sc_->dimension(get < 0 > (p2)));
+ return (std::get<0>(p1) > std::get<0>(p2));
+ // Why does this sort by decreasing dimension?
}
- FilteredComplex* sc_;
};
public:
- Persistent_cohomology_interface(FilteredComplex* stptr)
- : persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp>(*stptr),
- stptr_(stptr) { }
-
- Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max)
- : persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>(*stptr, persistence_dim_max),
+ Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max=false)
+ : Base(*stptr, persistence_dim_max),
stptr_(stptr) { }
- std::vector<std::pair<int, std::pair<double, double>>> get_persistence(int homology_coeff_field,
- double min_persistence) {
- persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::init_coefficients(homology_coeff_field);
- persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::compute_persistent_cohomology(min_persistence);
-
- // Custom sort and output persistence
- cmp_intervals_by_dim_then_length cmp(stptr_);
- auto persistent_pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::get_persistent_pairs();
- std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp);
+ // TODO: move to the constructors?
+ void compute_persistence(int homology_coeff_field, double min_persistence) {
+ Base::init_coefficients(homology_coeff_field);
+ Base::compute_persistent_cohomology(min_persistence);
+ }
+ std::vector<std::pair<int, std::pair<double, double>>> get_persistence() {
std::vector<std::pair<int, std::pair<double, double>>> persistence;
+ auto const& persistent_pairs = Base::get_persistent_pairs();
+ persistence.reserve(persistent_pairs.size());
for (auto pair : persistent_pairs) {
- persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)),
- std::make_pair(stptr_->filtration(get<0>(pair)),
- stptr_->filtration(get<1>(pair)))));
+ persistence.emplace_back(stptr_->dimension(get<0>(pair)),
+ std::make_pair(stptr_->filtration(get<0>(pair)),
+ stptr_->filtration(get<1>(pair))));
}
+ // Custom sort and output persistence
+ cmp_intervals_by_dim_then_length cmp;
+ std::sort(std::begin(persistence), std::end(persistence), cmp);
return persistence;
}
- std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs() {
- auto pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
+ // This function computes the top-dimensional cofaces associated to the positive and negative
+ // simplices of a cubical complex. The output format is a vector of vectors of three integers,
+ // which are [homological dimension, index of top-dimensional coface of positive simplex,
+ // index of top-dimensional coface of negative simplex]. If the topological feature is essential,
+ // then the index of top-dimensional coface of negative simplex is arbitrarily set to -1.
+ std::vector<std::vector<int>> cofaces_of_cubical_persistence_pairs() {
+
+ // Warning: this function is meant to be used with CubicalComplex only!!
+
+ auto&& pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
persistent_cohomology::Field_Zp>::get_persistent_pairs();
+ // Gather all top-dimensional cells and store their simplex handles
+ std::vector<std::size_t> max_splx;
+ for (auto splx : stptr_->top_dimensional_cells_range())
+ max_splx.push_back(splx);
+ // Sort these simplex handles and compute the ordering function
+ // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data
+ std::unordered_map<std::size_t, int> order;
+ //std::sort(max_splx.begin(), max_splx.end());
+ for (unsigned int i = 0; i < max_splx.size(); i++) order.emplace(max_splx[i], i);
+
+ std::vector<std::vector<int>> persistence_pairs;
+ for (auto pair : pairs) {
+ int h = stptr_->dimension(get<0>(pair));
+ // Recursively get the top-dimensional cell / coface associated to the persistence generator
+ std::size_t face0 = stptr_->get_top_dimensional_coface_of_a_cell(get<0>(pair));
+ // Retrieve the index of the corresponding top-dimensional cell in the input data
+ int splx0 = order[face0];
+
+ int splx1 = -1;
+ if (get<1>(pair) != stptr_->null_simplex()){
+ // Recursively get the top-dimensional cell / coface associated to the persistence generator
+ std::size_t face1 = stptr_->get_top_dimensional_coface_of_a_cell(get<1>(pair));
+ // Retrieve the index of the corresponding top-dimensional cell in the input data
+ splx1 = order[face1];
+ }
+ persistence_pairs.push_back({ h, splx0, splx1 });
+ }
+ return persistence_pairs;
+ }
+
+ std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs() {
std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs;
+ auto const& pairs = Base::get_persistent_pairs();
persistence_pairs.reserve(pairs.size());
+ std::vector<int> birth;
+ std::vector<int> death;
for (auto pair : pairs) {
- std::vector<int> birth;
+ birth.clear();
if (get<0>(pair) != stptr_->null_simplex()) {
for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))) {
birth.push_back(vertex);
}
}
- std::vector<int> death;
+ death.clear();
if (get<1>(pair) != stptr_->null_simplex()) {
+ death.reserve(birth.size()+1);
for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))) {
death.push_back(vertex);
}
}
- persistence_pairs.push_back(std::make_pair(birth, death));
+ persistence_pairs.emplace_back(birth, death);
}
return persistence_pairs;
}
+ // TODO: (possibly at the python level)
+ // - an option to return only some of those vectors?
+ typedef std::pair<std::vector<std::vector<int>>, std::vector<std::vector<int>>> Generators;
+
+ Generators lower_star_generators() {
+ Generators out;
+ // diags[i] should be interpreted as vector<array<int,2>>
+ auto& diags = out.first;
+ // diagsinf[i] should be interpreted as vector<int>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ auto v = stptr_->vertex_with_same_filtration(s);
+ if(t == stptr_->null_simplex()) {
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ diagsinf[dim].push_back(v);
+ } else {
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto w = stptr_->vertex_with_same_filtration(t);
+ auto& d = diags[dim];
+ d.insert(d.end(), { v, w });
+ }
+ }
+ return out;
+ }
+
+ // An alternative, to avoid those different sizes, would be to "pad" vertex generator v as (v, v) or (v, -1). When using it as index, this corresponds to adding the vertex filtration values either on the diagonal of the distance matrix, or as an extra row or column.
+ // We could also merge the vectors for different dimensions into a single one, with an extra column for the dimension (converted to type double).
+ Generators flag_generators() {
+ Generators out;
+ // diags[0] should be interpreted as vector<array<int,3>> and other diags[i] as vector<array<int,4>>
+ auto& diags = out.first;
+ // diagsinf[0] should be interpreted as vector<int> and other diagsinf[i] as vector<array<int,2>>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ bool infinite = t == stptr_->null_simplex();
+ if(infinite) {
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diagsinf.size()==0)diagsinf.emplace_back();
+ diagsinf[0].push_back(v);
+ } else {
+ auto e = stptr_->edge_with_same_filtration(s);
+ auto&& e_vertices = stptr_->simplex_vertex_range(e);
+ auto i = std::begin(e_vertices);
+ auto v1 = *i;
+ auto v2 = *++i;
+ GUDHI_CHECK(++i==std::end(e_vertices), "must be an edge");
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ auto& d = diagsinf[dim];
+ d.insert(d.end(), { v1, v2 });
+ }
+ } else {
+ auto et = stptr_->edge_with_same_filtration(t);
+ auto&& et_vertices = stptr_->simplex_vertex_range(et);
+ auto it = std::begin(et_vertices);
+ auto w1 = *it;
+ auto w2 = *++it;
+ GUDHI_CHECK(++it==std::end(et_vertices), "must be an edge");
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diags.size()==0)diags.emplace_back();
+ auto& d = diags[0];
+ d.insert(d.end(), { v, w1, w2 });
+ } else {
+ auto es = stptr_->edge_with_same_filtration(s);
+ auto&& es_vertices = stptr_->simplex_vertex_range(es);
+ auto is = std::begin(es_vertices);
+ auto v1 = *is;
+ auto v2 = *++is;
+ GUDHI_CHECK(++is==std::end(es_vertices), "must be an edge");
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto& d = diags[dim];
+ d.insert(d.end(), { v1, v2, w1, w2 });
+ }
+ }
+ }
+ return out;
+ }
+
+ using Filtration_value = typename FilteredComplex::Filtration_value;
+ using Birth_death = std::pair<Filtration_value, Filtration_value>;
+ using Persistence_subdiagrams = std::vector<std::vector<std::pair<int, Birth_death>>>;
+
+ Persistence_subdiagrams compute_extended_persistence_subdiagrams(Filtration_value min_persistence){
+ Persistence_subdiagrams pers_subs(4);
+ auto const& persistent_pairs = Base::get_persistent_pairs();
+ for (auto pair : persistent_pairs) {
+ std::pair<Filtration_value, Extended_simplex_type> px = stptr_->decode_extended_filtration(stptr_->filtration(get<0>(pair)),
+ stptr_->efd);
+ std::pair<Filtration_value, Extended_simplex_type> py = stptr_->decode_extended_filtration(stptr_->filtration(get<1>(pair)),
+ stptr_->efd);
+ std::pair<int, Birth_death> pd_point = std::make_pair(stptr_->dimension(get<0>(pair)),
+ std::make_pair(px.first, py.first));
+ if(std::abs(px.first - py.first) > min_persistence){
+ //Ordinary
+ if (px.second == Extended_simplex_type::UP && py.second == Extended_simplex_type::UP){
+ pers_subs[0].push_back(pd_point);
+ }
+ // Relative
+ else if (px.second == Extended_simplex_type::DOWN && py.second == Extended_simplex_type::DOWN){
+ pers_subs[1].push_back(pd_point);
+ }
+ else{
+ // Extended+
+ if (px.first < py.first){
+ pers_subs[2].push_back(pd_point);
+ }
+ //Extended-
+ else{
+ pers_subs[3].push_back(pd_point);
+ }
+ }
+ }
+ }
+ return pers_subs;
+ }
+
private:
// A copy
FilteredComplex* stptr_;
diff --git a/src/python/include/Rips_complex_interface.h b/src/python/include/Rips_complex_interface.h
index a66b0e5b..d98b0226 100644
--- a/src/python/include/Rips_complex_interface.h
+++ b/src/python/include/Rips_complex_interface.h
@@ -53,7 +53,6 @@ class Rips_complex_interface {
rips_complex_->create_complex(*simplex_tree, dim_max);
else
sparse_rips_complex_->create_complex(*simplex_tree, dim_max);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index 06f31341..0317ea39 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -15,12 +15,13 @@
#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Points_off_io.h>
-
-#include "Persistent_cohomology_interface.h"
+#include <gudhi/Flag_complex_edge_collapser.h>
#include <iostream>
#include <vector>
#include <utility> // std::pair
+#include <tuple>
+#include <iterator> // for std::distance
namespace Gudhi {
@@ -33,22 +34,60 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
using Simplex_handle = typename Base::Simplex_handle;
using Insertion_result = typename std::pair<Simplex_handle, bool>;
using Simplex = std::vector<Vertex_handle>;
- using Filtered_simplices = std::vector<std::pair<Simplex, Filtration_value>>;
+ using Simplex_and_filtration = std::pair<Simplex, Filtration_value>;
+ using Filtered_simplices = std::vector<Simplex_and_filtration>;
+ using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator;
+ using Complex_simplex_iterator = typename Base::Complex_simplex_iterator;
+ using Extended_filtration_data = typename Base::Extended_filtration_data;
+ using Boundary_simplex_iterator = typename Base::Boundary_simplex_iterator;
+ using Siblings = typename Base::Siblings;
+ using Node = typename Base::Node;
+ typedef bool (*blocker_func_t)(Simplex simplex, void *user_data);
public:
- bool find_simplex(const Simplex& vh) {
- return (Base::find(vh) != Base::null_simplex());
+
+ Extended_filtration_data efd;
+
+ bool find_simplex(const Simplex& simplex) {
+ return (Base::find(simplex) != Base::null_simplex());
}
- void assign_simplex_filtration(const Simplex& vh, Filtration_value filtration) {
- Base::assign_filtration(Base::find(vh), filtration);
+ void assign_simplex_filtration(const Simplex& simplex, Filtration_value filtration) {
+ Base::assign_filtration(Base::find(simplex), filtration);
+ Base::clear_filtration();
}
bool insert(const Simplex& simplex, Filtration_value filtration = 0) {
Insertion_result result = Base::insert_simplex_and_subfaces(simplex, filtration);
+ if (result.first != Base::null_simplex())
+ Base::clear_filtration();
return (result.second);
}
+ void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) {
+ // We could delegate to insert_graph, but wrapping the matrix in a graph interface is too much work,
+ // and this is a bit more efficient.
+ auto& rm = this->root()->members_;
+ for(int i=0; i<n; ++i) {
+ char* p = reinterpret_cast<char*>(filtrations) + i * stride0;
+ double fv = *reinterpret_cast<double*>(p + i * stride1);
+ if(fv > max_filtration) continue;
+ auto sh = rm.emplace_hint(rm.end(), i, Node(this->root(), fv));
+ Siblings* children = nullptr;
+ // Should we make a first pass to count the number of edges so we can reserve the right space?
+ for(int j=i+1; j<n; ++j) {
+ double fe = *reinterpret_cast<double*>(p + j * stride1);
+ if(fe > max_filtration) continue;
+ if(!children) {
+ children = new Siblings(this->root(), i);
+ sh->second.assign_children(children);
+ }
+ children->members().emplace_hint(children->members().end(), j, Node(children, fe));
+ }
+ }
+
+ }
+
// Do not interface this function, only used in alpha complex interface for complex creation
bool insert_simplex(const Simplex& simplex, Filtration_value filtration = 0) {
Insertion_result result = Base::insert_simplex(simplex, filtration);
@@ -79,32 +118,15 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
void remove_maximal_simplex(const Simplex& simplex) {
Base::remove_maximal_simplex(Base::find(simplex));
- Base::initialize_filtration();
+ Base::clear_filtration();
}
- Filtered_simplices get_filtration() {
- Base::initialize_filtration();
- Filtered_simplices filtrations;
- for (auto f_simplex : Base::filtration_simplex_range()) {
- Simplex simplex;
- for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
- simplex.insert(simplex.begin(), vertex);
- }
- filtrations.push_back(std::make_pair(simplex, Base::filtration(f_simplex)));
+ Simplex_and_filtration get_simplex_and_filtration(Simplex_handle f_simplex) {
+ Simplex simplex;
+ for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
+ simplex.insert(simplex.begin(), vertex);
}
- return filtrations;
- }
-
- Filtered_simplices get_skeleton(int dimension) {
- Filtered_simplices skeletons;
- for (auto f_simplex : Base::skeleton_simplex_range(dimension)) {
- Simplex simplex;
- for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
- simplex.insert(simplex.begin(), vertex);
- }
- skeletons.push_back(std::make_pair(simplex, Base::filtration(f_simplex)));
- }
- return skeletons;
+ return std::make_pair(std::move(simplex), Base::filtration(f_simplex));
}
Filtered_simplices get_star(const Simplex& simplex) {
@@ -131,9 +153,85 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
return cofaces;
}
- void create_persistence(Gudhi::Persistent_cohomology_interface<Base>* pcoh) {
- Base::initialize_filtration();
- pcoh = new Gudhi::Persistent_cohomology_interface<Base>(*this);
+ void compute_extended_filtration() {
+ this->efd = this->extend_filtration();
+ return;
+ }
+
+ Simplex_tree_interface* collapse_edges(int nb_collapse_iteration) {
+ using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
+ std::vector<Filtered_edge> edges;
+ for (Simplex_handle sh : Base::skeleton_simplex_range(1)) {
+ if (Base::dimension(sh) == 1) {
+ typename Base::Simplex_vertex_range rg = Base::simplex_vertex_range(sh);
+ auto vit = rg.begin();
+ Vertex_handle v = *vit;
+ Vertex_handle w = *++vit;
+ edges.emplace_back(v, w, Base::filtration(sh));
+ }
+ }
+
+ for (int iteration = 0; iteration < nb_collapse_iteration; iteration++) {
+ edges = Gudhi::collapse::flag_complex_collapse_edges(std::move(edges));
+ }
+ Simplex_tree_interface* collapsed_stree_ptr = new Simplex_tree_interface();
+ // Copy the original 0-skeleton
+ for (Simplex_handle sh : Base::skeleton_simplex_range(0)) {
+ collapsed_stree_ptr->insert({*(Base::simplex_vertex_range(sh).begin())}, Base::filtration(sh));
+ }
+ // Insert remaining edges
+ for (auto remaining_edge : edges) {
+ collapsed_stree_ptr->insert({std::get<0>(remaining_edge), std::get<1>(remaining_edge)}, std::get<2>(remaining_edge));
+ }
+ return collapsed_stree_ptr;
+ }
+
+ void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data) {
+ Base::expansion_with_blockers(dimension, [&](Simplex_handle sh){
+ Simplex simplex(Base::simplex_vertex_range(sh).begin(), Base::simplex_vertex_range(sh).end());
+ return user_func(simplex, user_data);
+ });
+ }
+
+ // Iterator over the simplex tree
+ Complex_simplex_iterator get_simplices_iterator_begin() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::complex_simplex_range().begin();
+ }
+
+ Complex_simplex_iterator get_simplices_iterator_end() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::complex_simplex_range().end();
+ }
+
+ typename std::vector<Simplex_handle>::const_iterator get_filtration_iterator_begin() {
+ // Base::initialize_filtration(); already performed in filtration_simplex_range
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::filtration_simplex_range().begin();
+ }
+
+ typename std::vector<Simplex_handle>::const_iterator get_filtration_iterator_end() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::filtration_simplex_range().end();
+ }
+
+ Skeleton_simplex_iterator get_skeleton_iterator_begin(int dimension) {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::skeleton_simplex_range(dimension).begin();
+ }
+
+ Skeleton_simplex_iterator get_skeleton_iterator_end(int dimension) {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::skeleton_simplex_range(dimension).end();
+ }
+
+ std::pair<Boundary_simplex_iterator, Boundary_simplex_iterator> get_boundary_iterators(const Simplex& simplex) {
+ auto bd_sh = Base::find(simplex);
+ if (bd_sh == Base::null_simplex())
+ throw std::runtime_error("simplex not found - cannot find boundaries");
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ auto boundary_srange = Base::boundary_simplex_range(bd_sh);
+ return std::make_pair(boundary_srange.begin(), boundary_srange.end());
}
};
diff --git a/src/python/include/Strong_witness_complex_interface.h b/src/python/include/Strong_witness_complex_interface.h
index cda5b514..e9ab0c7b 100644
--- a/src/python/include/Strong_witness_complex_interface.h
+++ b/src/python/include/Strong_witness_complex_interface.h
@@ -41,13 +41,11 @@ class Strong_witness_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree,
double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/Subsampling_interface.h b/src/python/include/Subsampling_interface.h
index cdda851f..6aee7231 100644
--- a/src/python/include/Subsampling_interface.h
+++ b/src/python/include/Subsampling_interface.h
@@ -11,6 +11,7 @@
#ifndef INCLUDE_SUBSAMPLING_INTERFACE_H_
#define INCLUDE_SUBSAMPLING_INTERFACE_H_
+#include <gudhi/distance_functions.h>
#include <gudhi/choose_n_farthest_points.h>
#include <gudhi/pick_n_random_points.h>
#include <gudhi/sparsify_point_set.h>
@@ -27,14 +28,13 @@ namespace subsampling {
using Subsampling_dynamic_kernel = CGAL::Epick_d< CGAL::Dynamic_dimension_tag >;
using Subsampling_point_d = Subsampling_dynamic_kernel::Point_d;
-using Subsampling_ft = Subsampling_dynamic_kernel::FT;
// ------ choose_n_farthest_points ------
std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector<std::vector<double>>& points,
unsigned nb_points) {
std::vector<std::vector<double>> landmarks;
- Subsampling_dynamic_kernel k;
- choose_n_farthest_points(k, points, nb_points, random_starting_point, std::back_inserter(landmarks));
+ choose_n_farthest_points(Euclidean_distance(), points, nb_points,
+ random_starting_point, std::back_inserter(landmarks));
return landmarks;
}
@@ -42,8 +42,8 @@ std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector
std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector<std::vector<double>>& points,
unsigned nb_points, unsigned starting_point) {
std::vector<std::vector<double>> landmarks;
- Subsampling_dynamic_kernel k;
- choose_n_farthest_points(k, points, nb_points, starting_point, std::back_inserter(landmarks));
+ choose_n_farthest_points(Euclidean_distance(), points, nb_points,
+ starting_point, std::back_inserter(landmarks));
return landmarks;
}
diff --git a/src/python/include/Tangential_complex_interface.h b/src/python/include/Tangential_complex_interface.h
index 698226cc..b1afce94 100644
--- a/src/python/include/Tangential_complex_interface.h
+++ b/src/python/include/Tangential_complex_interface.h
@@ -90,7 +90,6 @@ class Tangential_complex_interface {
void create_simplex_tree(Simplex_tree<>* simplex_tree) {
tangential_complex_->create_complex<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>(*simplex_tree);
- simplex_tree->initialize_filtration();
}
void set_max_squared_edge_length(double max_squared_edge_length) {
diff --git a/src/python/include/Witness_complex_interface.h b/src/python/include/Witness_complex_interface.h
index 45e14253..76947e53 100644
--- a/src/python/include/Witness_complex_interface.h
+++ b/src/python/include/Witness_complex_interface.h
@@ -41,13 +41,11 @@ class Witness_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree,
double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/pybind11_diagram_utils.h b/src/python/include/pybind11_diagram_utils.h
new file mode 100644
index 00000000..2d5194f4
--- /dev/null
+++ b/src/python/include/pybind11_diagram_utils.h
@@ -0,0 +1,39 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <boost/range/counting_range.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+namespace py = pybind11;
+typedef py::array_t<double> Dgm;
+
+// Get m[i,0] and m[i,1] as a pair
+static auto pairify(void* p, py::ssize_t h, py::ssize_t w) {
+ return [=](py::ssize_t i){
+ char* birth = (char*)p + i * h;
+ char* death = birth + w;
+ return std::make_pair(*(double*)birth, *(double*)death);
+ };
+}
+
+inline auto numpy_to_range_of_pairs(py::array_t<double> dgm) {
+ py::buffer_info buf = dgm.request();
+ // shape (n,2) or (0) for empty
+ if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0))
+ throw std::runtime_error("Diagram must be an array of size n x 2");
+ // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it.
+ py::ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0;
+ auto cnt = boost::counting_range<py::ssize_t>(0, buf.shape[0]);
+ return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1));
+ // Be careful that the returned range cannot contain references to dead temporaries.
+}
diff --git a/src/python/introduction.rst b/src/python/introduction.rst
new file mode 100644
index 00000000..11c06ac5
--- /dev/null
+++ b/src/python/introduction.rst
@@ -0,0 +1,24 @@
+The Gudhi library is an open source library for Computational Topology and
+Topological Data Analysis (TDA). It offers state-of-the-art algorithms
+to construct various types of simplicial complexes, data structures to
+represent them, and algorithms to compute geometric approximations of shapes
+and persistent homology.
+
+The GUDHI library offers the following interoperable modules:
+
+* Complexes:
+ * Cubical
+ * Simplicial: Rips, Witness, Alpha and Čech complexes
+ * Cover: Nerve and Graph induced complexes
+* Data structures and basic operations:
+ * Simplex tree, Skeleton blockers and Toplex map
+ * Construction, update, filtration and simplification
+* Topological descriptors computation
+* Manifold reconstruction
+* Topological descriptors tools:
+ * Bottleneck distance
+ * Statistical tools
+ * Persistence diagram and barcode
+
+For more information about Topological Data Analysis and its workflow, please
+refer to the `Wikipedia TDA dedicated page <https://en.wikipedia.org/wiki/Topological_data_analysis>`_.
diff --git a/src/python/pyproject.toml b/src/python/pyproject.toml
new file mode 100644
index 00000000..55b64466
--- /dev/null
+++ b/src/python/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools>=24.2.0", "wheel", "numpy>=1.15.0", "cython>=0.27", "pybind11"]
+build-backend = "setuptools.build_meta"
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index f993165c..6eb0db42 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -5,19 +5,22 @@
Copyright (C) 2019 Inria
Modification(s):
+ - 2021/12 Vincent Rouvreau: Python 3.5 as minimal version
- YYYY/MM Author: Description of the modification
"""
-from setuptools import setup, Extension
+from setuptools import setup, Extension, find_packages
from Cython.Build import cythonize
from numpy import get_include as numpy_get_include
import sys
+import pybind11
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
-modules = [@GUDHI_PYTHON_MODULES_TO_COMPILE@]
+cython_modules = [@GUDHI_CYTHON_MODULES@]
+pybind11_modules = [@GUDHI_PYBIND11_MODULES@]
source_dir='@CMAKE_CURRENT_SOURCE_DIR@/gudhi/'
extra_compile_args=[@GUDHI_PYTHON_EXTRA_COMPILE_ARGS@]
@@ -29,7 +32,7 @@ runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@]
# Create ext_modules list from module list
ext_modules = []
-for module in modules:
+for module in cython_modules:
ext_modules.append(Extension(
'gudhi.' + module,
sources = [source_dir + module + '.pyx',],
@@ -39,17 +42,48 @@ for module in modules:
libraries=libraries,
library_dirs=library_dirs,
include_dirs=include_dirs,
+ runtime_library_dirs=runtime_library_dirs,))
+
+ext_modules = cythonize(ext_modules, compiler_directives={'language_level': '3'})
+
+for module in pybind11_modules:
+ my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
+ ext_modules.append(Extension(
+ 'gudhi.' + module.replace('/', '.'),
+ sources = [source_dir + module + '.cc'],
+ language = 'c++',
+ include_dirs = my_include_dirs,
+ extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@],
+ extra_link_args=extra_link_args,
+ libraries=libraries,
+ library_dirs=library_dirs,
runtime_library_dirs=runtime_library_dirs,
- cython_directives = {'language_level': str(sys.version_info[0])},))
+ ))
+
+# read the contents of introduction.rst
+with open("introduction.rst", "r") as fh:
+ long_description = fh.read()
setup(
name = 'gudhi',
- packages=["gudhi","gudhi.representations"],
+ packages=find_packages(), # find_namespace_packages(include=["gudhi*"])
author='GUDHI Editorial Board',
- author_email='gudhi-contact@lists.gforge.inria.fr',
+ author_email='gudhi-contact@inria.fr',
version='@GUDHI_VERSION@',
- url='http://gudhi.gforge.inria.fr/',
- ext_modules = cythonize(ext_modules),
- install_requires = ['cython','numpy >= 1.9',],
- setup_requires = ['numpy >= 1.9',],
+ url='https://gudhi.inria.fr/',
+ project_urls={
+ 'Bug Tracker': 'https://github.com/GUDHI/gudhi-devel/issues',
+ 'Documentation': 'https://gudhi.inria.fr/python/latest/',
+ 'Source Code': 'https://github.com/GUDHI/gudhi-devel',
+ 'License': 'https://gudhi.inria.fr/licensing/'
+ },
+ description='The Gudhi library is an open source library for ' \
+ 'Computational Topology and Topological Data Analysis (TDA).',
+ data_files=[('.', ['./introduction.rst'])],
+ long_description_content_type='text/x-rst',
+ long_description=long_description,
+ ext_modules = ext_modules,
+ python_requires='>=3.5.0',
+ install_requires = ['numpy >= 1.15.0',],
+ package_data={"": ["*.dll"], },
)
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index 3761fe16..f81e6137 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -8,10 +8,12 @@
- YYYY/MM Author: Description of the modification
"""
-from gudhi import AlphaComplex, SimplexTree
+from gudhi import AlphaComplex
import math
import numpy as np
import pytest
+import warnings
+
try:
# python3
from itertools import zip_longest
@@ -19,19 +21,24 @@ except ImportError:
# python2
from itertools import izip_longest as zip_longest
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "MIT"
-def test_empty_alpha():
- alpha_complex = AlphaComplex(points=[[0, 0]])
+def _empty_alpha(precision):
+ alpha_complex = AlphaComplex(precision = precision)
assert alpha_complex.__is_defined() == True
+def _one_2d_point_alpha(precision):
+ alpha_complex = AlphaComplex(points=[[0, 0]], precision = precision)
+ assert alpha_complex.__is_defined() == True
-def test_infinite_alpha():
+def test_empty_alpha():
+ for precision in ['fast', 'safe', 'exact']:
+ _empty_alpha(precision)
+ _one_2d_point_alpha(precision)
+
+def _infinite_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
- alpha_complex = AlphaComplex(points=point_list)
+ alpha_complex = AlphaComplex(points=point_list, precision = precision)
assert alpha_complex.__is_defined() == True
simplex_tree = alpha_complex.create_simplex_tree()
@@ -40,7 +47,7 @@ def test_infinite_alpha():
assert simplex_tree.num_simplices() == 11
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -53,6 +60,7 @@ def test_infinite_alpha():
([0, 1, 2], 0.5),
([1, 2, 3], 0.5),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 0.25),
@@ -65,23 +73,17 @@ def test_infinite_alpha():
assert point_list[1] == alpha_complex.get_point(1)
assert point_list[2] == alpha_complex.get_point(2)
assert point_list[3] == alpha_complex.get_point(3)
- try:
- alpha_complex.get_point(4) == []
- except IndexError:
- pass
- else:
- assert False
- try:
- alpha_complex.get_point(125) == []
- except IndexError:
- pass
- else:
- assert False
+ with pytest.raises(IndexError):
+ alpha_complex.get_point(len(point_list))
-def test_filtered_alpha():
+def test_infinite_alpha():
+ for precision in ['fast', 'safe', 'exact']:
+ _infinite_alpha(precision)
+
+def _filtered_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
- filtered_alpha = AlphaComplex(points=point_list)
+ filtered_alpha = AlphaComplex(points=point_list, precision = precision)
simplex_tree = filtered_alpha.create_simplex_tree(max_alpha_square=0.25)
@@ -92,20 +94,11 @@ def test_filtered_alpha():
assert point_list[1] == filtered_alpha.get_point(1)
assert point_list[2] == filtered_alpha.get_point(2)
assert point_list[3] == filtered_alpha.get_point(3)
- try:
- filtered_alpha.get_point(4) == []
- except IndexError:
- pass
- else:
- assert False
- try:
- filtered_alpha.get_point(125) == []
- except IndexError:
- pass
- else:
- assert False
-
- assert simplex_tree.get_filtration() == [
+
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(len(point_list))
+
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -118,7 +111,11 @@ def test_filtered_alpha():
assert simplex_tree.get_star([0]) == [([0], 0.0), ([0, 1], 0.25), ([0, 2], 0.25)]
assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)]
-def test_safe_alpha_persistence_comparison():
+def test_filtered_alpha():
+ for precision in ['fast', 'safe', 'exact']:
+ _filtered_alpha(precision)
+
+def _safe_alpha_persistence_comparison(precision):
#generate periodic signal
time = np.arange(0, 10, 1)
signal = [math.sin(x) for x in time]
@@ -130,10 +127,10 @@ def test_safe_alpha_persistence_comparison():
embedding2 = [[signal[i], delayed[i]] for i in range(len(time))]
#build alpha complex and simplex tree
- alpha_complex1 = AlphaComplex(points=embedding1)
+ alpha_complex1 = AlphaComplex(points=embedding1, precision = precision)
simplex_tree1 = alpha_complex1.create_simplex_tree()
- alpha_complex2 = AlphaComplex(points=embedding2)
+ alpha_complex2 = AlphaComplex(points=embedding2, precision = precision)
simplex_tree2 = alpha_complex2.create_simplex_tree()
diag1 = simplex_tree1.persistence()
@@ -142,3 +139,177 @@ def test_safe_alpha_persistence_comparison():
for (first_p, second_p) in zip_longest(diag1, diag2):
assert first_p[0] == pytest.approx(second_p[0])
assert first_p[1] == pytest.approx(second_p[1])
+
+
+def test_safe_alpha_persistence_comparison():
+ # Won't work for 'fast' version
+ _safe_alpha_persistence_comparison('safe')
+ _safe_alpha_persistence_comparison('exact')
+
+def _delaunay_complex(precision):
+ point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
+ filtered_alpha = AlphaComplex(points=point_list, precision = precision)
+
+ simplex_tree = filtered_alpha.create_simplex_tree(default_filtration_value = True)
+
+ assert simplex_tree.num_simplices() == 11
+ assert simplex_tree.num_vertices() == 4
+
+ assert point_list[0] == filtered_alpha.get_point(0)
+ assert point_list[1] == filtered_alpha.get_point(1)
+ assert point_list[2] == filtered_alpha.get_point(2)
+ assert point_list[3] == filtered_alpha.get_point(3)
+
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(4)
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(125)
+
+ for filtered_value in simplex_tree.get_filtration():
+ assert math.isnan(filtered_value[1])
+ for filtered_value in simplex_tree.get_star([0]):
+ assert math.isnan(filtered_value[1])
+ for filtered_value in simplex_tree.get_cofaces([0], 1):
+ assert math.isnan(filtered_value[1])
+
+def test_delaunay_complex():
+ for precision in ['fast', 'safe', 'exact']:
+ _delaunay_complex(precision)
+
+def _3d_points_on_a_plane(precision, default_filtration_value):
+ alpha = AlphaComplex(points = [[1.0, 1.0 , 0.0],
+ [7.0, 0.0 , 0.0],
+ [4.0, 6.0 , 0.0],
+ [9.0, 6.0 , 0.0],
+ [0.0, 14.0, 0.0],
+ [2.0, 19.0, 0.0],
+ [9.0, 17.0, 0.0]], precision = precision)
+
+ simplex_tree = alpha.create_simplex_tree(default_filtration_value = default_filtration_value)
+ assert simplex_tree.dimension() == 2
+ assert simplex_tree.num_vertices() == 7
+ assert simplex_tree.num_simplices() == 25
+
+def test_3d_points_on_a_plane():
+ for default_filtration_value in [True, False]:
+ for precision in ['fast', 'safe', 'exact']:
+ _3d_points_on_a_plane(precision, default_filtration_value)
+
+def _3d_tetrahedrons(precision):
+ points = 10*np.random.rand(10, 3)
+ alpha = AlphaComplex(points = points, precision = precision)
+ st_alpha = alpha.create_simplex_tree(default_filtration_value = False)
+ # New AlphaComplex for get_point to work
+ delaunay = AlphaComplex(points = points, precision = precision)
+ st_delaunay = delaunay.create_simplex_tree(default_filtration_value = True)
+
+ delaunay_tetra = []
+ for sk in st_delaunay.get_skeleton(4):
+ if len(sk[0]) == 4:
+ tetra = [delaunay.get_point(sk[0][0]),
+ delaunay.get_point(sk[0][1]),
+ delaunay.get_point(sk[0][2]),
+ delaunay.get_point(sk[0][3]) ]
+ delaunay_tetra.append(sorted(tetra, key=lambda tup: tup[0]))
+
+ alpha_tetra = []
+ for sk in st_alpha.get_skeleton(4):
+ if len(sk[0]) == 4:
+ tetra = [alpha.get_point(sk[0][0]),
+ alpha.get_point(sk[0][1]),
+ alpha.get_point(sk[0][2]),
+ alpha.get_point(sk[0][3]) ]
+ alpha_tetra.append(sorted(tetra, key=lambda tup: tup[0]))
+
+ # Check the tetrahedrons from one list are in the second one
+ assert len(alpha_tetra) == len(delaunay_tetra)
+ for tetra_from_del in delaunay_tetra:
+ assert tetra_from_del in alpha_tetra
+
+def test_3d_tetrahedrons():
+ for precision in ['fast', 'safe', 'exact']:
+ _3d_tetrahedrons(precision)
+
+def test_off_file_deprecation_warning():
+ off_file = open("alphacomplexdoc.off", "w")
+ off_file.write("OFF \n" \
+ "7 0 0 \n" \
+ "1.0 1.0 0.0\n" \
+ "7.0 0.0 0.0\n" \
+ "4.0 6.0 0.0\n" \
+ "9.0 6.0 0.0\n" \
+ "0.0 14.0 0.0\n" \
+ "2.0 19.0 0.0\n" \
+ "9.0 17.0 0.0\n" )
+ off_file.close()
+
+ with pytest.warns(DeprecationWarning):
+ alpha = AlphaComplex(off_file="alphacomplexdoc.off")
+
+def test_non_existing_off_file():
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(FileNotFoundError):
+ alpha = AlphaComplex(off_file="pouetpouettralala.toubiloubabdou")
+
+def test_inconsistency_points_and_weights():
+ points = [[1.0, 1.0 , 0.0],
+ [7.0, 0.0 , 0.0],
+ [4.0, 6.0 , 0.0],
+ [9.0, 6.0 , 0.0],
+ [0.0, 14.0, 0.0],
+ [2.0, 19.0, 0.0],
+ [9.0, 17.0, 0.0]]
+ with pytest.raises(ValueError):
+ # 7 points, 8 weights, on purpose
+ alpha = AlphaComplex(points = points,
+ weights = [1., 2., 3., 4., 5., 6., 7., 8.])
+
+ with pytest.raises(ValueError):
+ # 7 points, 6 weights, on purpose
+ alpha = AlphaComplex(points = points,
+ weights = [1., 2., 3., 4., 5., 6.])
+
+def _weighted_doc_example(precision):
+ stree = AlphaComplex(points=[[ 1., -1., -1.],
+ [-1., 1., -1.],
+ [-1., -1., 1.],
+ [ 1., 1., 1.],
+ [ 2., 2., 2.]],
+ weights = [4., 4., 4., 4., 1.],
+ precision = precision).create_simplex_tree()
+
+ assert stree.filtration([0, 1, 2, 3]) == pytest.approx(-1.)
+ assert stree.filtration([0, 1, 3, 4]) == pytest.approx(95.)
+ assert stree.filtration([0, 2, 3, 4]) == pytest.approx(95.)
+ assert stree.filtration([1, 2, 3, 4]) == pytest.approx(95.)
+
+def test_weighted_doc_example():
+ for precision in ['fast', 'safe', 'exact']:
+ _weighted_doc_example(precision)
+
+def test_float_relative_precision():
+ assert AlphaComplex.get_float_relative_precision() == 1e-5
+ # Must be > 0.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(0.)
+ # Must be < 1.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(1.)
+
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations = list(st.get_filtration())
+
+ # Get a better precision
+ AlphaComplex.set_float_relative_precision(1e-15)
+ assert AlphaComplex.get_float_relative_precision() == 1e-15
+
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations_better_resolution = list(st.get_filtration())
+
+ assert len(filtrations) == len(filtrations_better_resolution)
+ for idx in range(len(filtrations)):
+ # check simplex is the same
+ assert filtrations[idx][0] == filtrations_better_resolution[idx][0]
+ # check filtration is about the same with a relative precision of the worst case
+ assert filtrations[idx][1] == pytest.approx(filtrations_better_resolution[idx][1], rel=1e-5)
diff --git a/src/python/test/test_betti_curve_representations.py b/src/python/test/test_betti_curve_representations.py
new file mode 100755
index 00000000..6a45da4d
--- /dev/null
+++ b/src/python/test/test_betti_curve_representations.py
@@ -0,0 +1,59 @@
+import numpy as np
+import scipy.interpolate
+import pytest
+
+from gudhi.representations.vector_methods import BettiCurve
+
+def test_betti_curve_is_irregular_betti_curve_followed_by_interpolation():
+ m = 10
+ n = 1000
+ pinf = 0.05
+ pzero = 0.05
+ res = 100
+
+ pds = []
+ for i in range(0, m):
+ pd = np.zeros((n, 2))
+ pd[:, 0] = np.random.uniform(0, 10, n)
+ pd[:, 1] = np.random.uniform(pd[:, 0], 10, n)
+ pd[np.random.uniform(0, 1, n) < pzero, 0] = 0
+ pd[np.random.uniform(0, 1, n) < pinf, 1] = np.inf
+ pds.append(pd)
+
+ bc = BettiCurve(resolution=None, predefined_grid=None)
+ bc.fit(pds)
+ bettis = bc.transform(pds)
+
+ bc2 = BettiCurve(resolution=None, predefined_grid=None)
+ bettis2 = bc2.fit_transform(pds)
+ assert((bc2.grid_ == bc.grid_).all())
+ assert((bettis2 == bettis).all())
+
+ for i in range(0, m):
+ grid = np.linspace(pds[i][np.isfinite(pds[i])].min(), pds[i][np.isfinite(pds[i])].max() + 1, res)
+ bc_gridded = BettiCurve(predefined_grid=grid)
+ bc_gridded.fit([])
+ bettis_gridded = bc_gridded(pds[i])
+
+ interp = scipy.interpolate.interp1d(bc.grid_, bettis[i, :], kind="previous", fill_value="extrapolate")
+ bettis_interp = np.array(interp(grid), dtype=int)
+ assert((bettis_interp == bettis_gridded).all())
+
+
+def test_empty_with_predefined_grid():
+ random_grid = np.sort(np.random.uniform(0, 1, 100))
+ bc = BettiCurve(predefined_grid=random_grid)
+ bettis = bc.fit_transform([])
+ assert((bc.grid_ == random_grid).all())
+ assert((bettis == 0).all())
+
+
+def test_empty():
+ bc = BettiCurve(resolution=None, predefined_grid=None)
+ bettis = bc.fit_transform([])
+ assert(bc.grid_ == [-np.inf])
+ assert((bettis == 0).all())
+
+def test_wrong_value_of_predefined_grid():
+ with pytest.raises(ValueError):
+ BettiCurve(predefined_grid=[1, 2, 3])
diff --git a/src/python/test/test_bottleneck_distance.py b/src/python/test/test_bottleneck_distance.py
index 70b2abad..07fcc9cc 100755
--- a/src/python/test/test_bottleneck_distance.py
+++ b/src/python/test/test_bottleneck_distance.py
@@ -9,6 +9,8 @@
"""
import gudhi
+import gudhi.hera
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -19,5 +21,19 @@ def test_basic_bottleneck():
diag1 = [[2.7, 3.7], [9.6, 14.0], [34.2, 34.974], [3.0, float("Inf")]]
diag2 = [[2.8, 4.45], [9.5, 14.1], [3.2, float("Inf")]]
- assert gudhi.bottleneck_distance(diag1, diag2, 0.1) == 0.8081763781405569
assert gudhi.bottleneck_distance(diag1, diag2) == 0.75
+ assert gudhi.bottleneck_distance(diag1, diag2, 0.1) == pytest.approx(0.75, abs=0.1)
+ assert gudhi.hera.bottleneck_distance(diag1, diag2, 0) == 0.75
+ assert gudhi.hera.bottleneck_distance(diag1, diag2, 0.1) == pytest.approx(0.75, rel=0.1)
+
+ import numpy as np
+
+ # Translating both diagrams along the diagonal should not affect the distance,
+ # checks that negative numbers are not an issue
+ diag1 = np.array(diag1) - 100
+ diag2 = np.array(diag2) - 100
+
+ assert gudhi.bottleneck_distance(diag1, diag2) == 0.75
+ assert gudhi.bottleneck_distance(diag1, diag2, 0.1) == pytest.approx(0.75, abs=0.1)
+ assert gudhi.hera.bottleneck_distance(diag1, diag2, 0) == 0.75
+ assert gudhi.hera.bottleneck_distance(diag1, diag2, 0.1) == pytest.approx(0.75, rel=0.1)
diff --git a/src/python/test/test_cover_complex.py b/src/python/test/test_cover_complex.py
index 32bc5a26..260f6a5c 100755
--- a/src/python/test/test_cover_complex.py
+++ b/src/python/test/test_cover_complex.py
@@ -9,6 +9,7 @@
"""
from gudhi import CoverComplex
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2018 Inria"
@@ -24,7 +25,8 @@ def test_empty_constructor():
def test_non_existing_file_read():
# Try to open a non existing file
cover = CoverComplex()
- assert cover.read_point_cloud("pouetpouettralala.toubiloubabdou") == False
+ with pytest.raises(FileNotFoundError):
+ cover.read_point_cloud("pouetpouettralala.toubiloubabdou")
def test_files_creation():
diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py
index 8c1b2600..29d559b3 100755
--- a/src/python/test/test_cubical_complex.py
+++ b/src/python/test/test_cubical_complex.py
@@ -10,6 +10,7 @@
from gudhi import CubicalComplex, PeriodicCubicalComplex
import numpy as np
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -25,9 +26,8 @@ def test_empty_constructor():
def test_non_existing_perseus_file_constructor():
# Try to open a non existing file
- cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou")
- assert cub.__is_defined() == False
- assert cub.__is_persistence_defined() == False
+ with pytest.raises(FileNotFoundError):
+ cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou")
def test_dimension_or_perseus_file_constructor():
@@ -147,3 +147,55 @@ def test_connected_sublevel_sets():
periodic_dimensions = periodic_dimensions)
assert cub.persistence() == [(0, (2.0, float("inf")))]
assert cub.betti_numbers() == [1, 0, 0]
+
+def test_cubical_generators():
+ cub = CubicalComplex(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ cub.persistence()
+ g = cub.cofaces_of_persistence_pairs()
+ assert len(g[0]) == 2
+ assert len(g[1]) == 1
+ assert np.array_equal(g[0][0], np.empty(shape=[0,2]))
+ assert np.array_equal(g[0][1], np.array([[7, 4]]))
+ assert np.array_equal(g[1][0], np.array([8]))
+
+def test_cubical_cofaces_of_persistence_pairs_when_pd_has_no_paired_birth_and_death():
+ cubCpx = CubicalComplex(dimensions=[1,2], top_dimensional_cells=[0.0, 1.0])
+ Diag = cubCpx.persistence(homology_coeff_field=2, min_persistence=0)
+ pairs = cubCpx.cofaces_of_persistence_pairs()
+ assert pairs[0] == []
+ assert np.array_equal(pairs[1][0], np.array([0]))
+
+def test_periodic_cofaces_of_persistence_pairs_when_pd_has_no_paired_birth_and_death():
+ perCubCpx = PeriodicCubicalComplex(dimensions=[1,2], top_dimensional_cells=[0.0, 1.0],
+ periodic_dimensions=[True, True])
+ Diag = perCubCpx.persistence(homology_coeff_field=2, min_persistence=0)
+ pairs = perCubCpx.cofaces_of_persistence_pairs()
+ assert pairs[0] == []
+ assert np.array_equal(pairs[1][0], np.array([0]))
+ assert np.array_equal(pairs[1][1], np.array([0, 1]))
+ assert np.array_equal(pairs[1][2], np.array([1]))
+
+def test_cubical_persistence_intervals_in_dimension():
+ cub = CubicalComplex(
+ dimensions=[3, 3],
+ top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ )
+ cub.compute_persistence()
+ H0 = cub.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[ 1., float("inf")]]))
+ assert cub.persistence_intervals_in_dimension(1).shape == (0, 2)
+
+def test_periodic_cubical_persistence_intervals_in_dimension():
+ cub = PeriodicCubicalComplex(
+ dimensions=[3, 3],
+ top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ periodic_dimensions = [True, True]
+ )
+ cub.compute_persistence()
+ H0 = cub.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[ 1., float("inf")]]))
+ H1 = cub.persistence_intervals_in_dimension(1)
+ assert np.array_equal(H1, np.array([[ 3., float("inf")], [ 7., float("inf")]]))
+ H2 = cub.persistence_intervals_in_dimension(2)
+ assert np.array_equal(H2, np.array([[ 9., float("inf")]]))
+ assert cub.persistence_intervals_in_dimension(3).shape == (0, 2)
diff --git a/src/python/test/test_datasets_generators.py b/src/python/test/test_datasets_generators.py
new file mode 100755
index 00000000..91ec4a65
--- /dev/null
+++ b/src/python/test/test_datasets_generators.py
@@ -0,0 +1,39 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Hind Montassif
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.datasets.generators import points
+
+import pytest
+
+def test_sphere():
+ assert points.sphere(n_samples = 10, ambient_dim = 2, radius = 1., sample = 'random').shape == (10, 2)
+
+ with pytest.raises(ValueError):
+ points.sphere(n_samples = 10, ambient_dim = 2, radius = 1., sample = 'other')
+
+def _basic_torus(impl):
+ assert impl(n_samples = 64, dim = 3, sample = 'random').shape == (64, 6)
+ assert impl(n_samples = 64, dim = 3, sample = 'grid').shape == (64, 6)
+
+ assert impl(n_samples = 10, dim = 4, sample = 'random').shape == (10, 8)
+
+ # Here 1**dim < n_samples < 2**dim, the output shape is therefore (1, 2*dim) = (1, 8), where shape[0] is rounded down to the closest perfect 'dim'th power
+ assert impl(n_samples = 10, dim = 4, sample = 'grid').shape == (1, 8)
+
+ with pytest.raises(ValueError):
+ impl(n_samples = 10, dim = 4, sample = 'other')
+
+def test_torus():
+ for torus_impl in [points.torus, points.ctorus]:
+ _basic_torus(torus_impl)
+ # Check that the two versions (torus and ctorus) generate the same output
+ assert points.ctorus(n_samples = 64, dim = 3, sample = 'random').all() == points.torus(n_samples = 64, dim = 3, sample = 'random').all()
+ assert points.ctorus(n_samples = 64, dim = 3, sample = 'grid').all() == points.torus(n_samples = 64, dim = 3, sample = 'grid').all()
+ assert points.ctorus(n_samples = 10, dim = 3, sample = 'grid').all() == points.torus(n_samples = 10, dim = 3, sample = 'grid').all()
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py
new file mode 100644
index 00000000..dca001a9
--- /dev/null
+++ b/src/python/test/test_diff.py
@@ -0,0 +1,78 @@
+from gudhi.tensorflow import *
+import numpy as np
+import tensorflow as tf
+import gudhi as gd
+
+def test_rips_diff():
+
+ Xinit = np.array([[1.,1.],[2.,2.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[-.5,-.5],[.5,.5]]),1) <= 1e-6
+
+def test_cubical_diff():
+
+ Xinit = np.array([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]]),1) <= 1e-6
+
+def test_nonsquare_cubical_diff():
+
+ Xinit = np.array([[-1.,1.,0.],[1.,1.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[0.,0.5,-0.5],[0.,0.,0.]]),1) <= 1e-6
+
+def test_st_diff():
+
+ st = gd.SimplexTree()
+ st.insert([0])
+ st.insert([1])
+ st.insert([2])
+ st.insert([3])
+ st.insert([4])
+ st.insert([5])
+ st.insert([6])
+ st.insert([7])
+ st.insert([8])
+ st.insert([9])
+ st.insert([10])
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ Finit = np.array([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=np.float32)
+ F = tf.Variable(initial_value=Finit, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [F])
+
+ assert tf.math.reduce_all(tf.math.equal(grads[0].indices, tf.constant([2,4])))
+ assert tf.math.reduce_all(tf.math.equal(grads[0].values, tf.constant([-1.,1.])))
+
diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py
new file mode 100755
index 00000000..b276f041
--- /dev/null
+++ b/src/python/test/test_dtm.py
@@ -0,0 +1,101 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.dtm import DistanceToMeasure, DTMDensity
+import numpy
+import pytest
+import torch
+import math
+import warnings
+
+
+def test_dtm_compare_euclidean():
+ pts = numpy.random.rand(1000, 4)
+ k = 6
+ dtm = DistanceToMeasure(k, implementation="ckdtree")
+ r0 = dtm.fit_transform(pts)
+ dtm = DistanceToMeasure(k, implementation="sklearn")
+ r1 = dtm.fit_transform(pts)
+ assert r1 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="sklearn", algorithm="brute")
+ r2 = dtm.fit_transform(pts)
+ assert r2 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="hnsw")
+ r3 = dtm.fit_transform(pts)
+ assert r3 == pytest.approx(r0, rel=0.1)
+ from scipy.spatial.distance import cdist
+
+ d = cdist(pts, pts)
+ dtm = DistanceToMeasure(k, metric="precomputed")
+ r4 = dtm.fit_transform(d)
+ assert r4 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, metric="precomputed", n_jobs=2)
+ r4b = dtm.fit_transform(d)
+ assert r4b == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="keops")
+ r5 = dtm.fit_transform(pts)
+ assert r5 == pytest.approx(r0)
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="keops", enable_autodiff=True)
+ r6 = dtm.fit_transform(pts2)
+ assert r6.detach().numpy() == pytest.approx(r0)
+ r6.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True)
+ r7 = dtm.fit_transform(pts2)
+ assert r7.detach().numpy() == pytest.approx(r0)
+ r7.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+
+
+def test_dtm_precomputed():
+ dist = numpy.array([[1.0, 3, 8], [1, 5, 5], [0, 2, 3]])
+ dtm = DistanceToMeasure(2, q=1, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 3, 1])
+
+ dist = numpy.array([[2.0, 2], [0, 1], [3, 4]])
+ dtm = DistanceToMeasure(2, q=2, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 0.707, 3.5355], rel=0.01)
+
+
+def test_density_normalized():
+ sample = numpy.random.normal(0, 1, (1000000, 2))
+ queries = numpy.array([[0.0, 0.0], [-0.5, 0.7], [0.4, 1.7]])
+ expected = numpy.exp(-(queries ** 2).sum(-1) / 2) / (2 * math.pi)
+ estimated = DTMDensity(k=150, normalize=True).fit(sample).transform(queries)
+ assert estimated == pytest.approx(expected, rel=0.4)
+
+
+def test_density():
+ distances = [[0, 1, 10], [2, 0, 30], [1, 3, 5]]
+ density = DTMDensity(k=2, metric="neighbors", dim=1).fit_transform(distances)
+ expected = numpy.array([2.0, 1.0, 0.5])
+ assert density == pytest.approx(expected)
+ distances = [[0, 1], [2, 0], [1, 3]]
+ density = DTMDensity(metric="neighbors", dim=1).fit_transform(distances)
+ assert density == pytest.approx(expected)
+ density = DTMDensity(weights=[0.5, 0.5], metric="neighbors", dim=1).fit_transform(distances)
+ assert density == pytest.approx(expected)
+
+def test_dtm_overflow_warnings():
+ pts = numpy.array([[10., 100000000000000000000000000000.], [1000., 100000000000000000000000000.]])
+ impl_warn = ["keops", "hnsw"]
+ for impl in impl_warn:
+ with warnings.catch_warnings(record=True) as w:
+ dtm = DistanceToMeasure(2, implementation=impl)
+ r = dtm.fit_transform(pts)
+ assert len(w) == 1
+ assert issubclass(w[0].category, RuntimeWarning)
+ assert "Overflow" in str(w[0].message)
diff --git a/src/python/test/test_dtm_rips_complex.py b/src/python/test/test_dtm_rips_complex.py
new file mode 100644
index 00000000..e1c0ee44
--- /dev/null
+++ b/src/python/test/test_dtm_rips_complex.py
@@ -0,0 +1,32 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Yuichi Ike
+
+ Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.dtm_rips_complex import DTMRipsComplex
+from gudhi import RipsComplex
+import numpy as np
+from math import sqrt
+import pytest
+
+def test_dtm_rips_complex():
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dtm_rips = DTMRipsComplex(points=pts, k=2)
+ st = dtm_rips.create_simplex_tree(max_dimension=2)
+ st.persistence()
+ persistence_intervals0 = st.persistence_intervals_in_dimension(0)
+ assert persistence_intervals0 == pytest.approx(np.array([[3.16227766, 5.39834564],[3.16227766, 5.39834564], [3.16227766, float("inf")]]))
+
+def test_compatibility_with_rips():
+ distance_matrix = np.array([[0, 1, 1, sqrt(2)], [1, 0, sqrt(2), 1], [1, sqrt(2), 0, 1], [sqrt(2), 1, 1, 0]])
+ dtm_rips = DTMRipsComplex(distance_matrix=distance_matrix, max_filtration=42)
+ st = dtm_rips.create_simplex_tree(max_dimension=1)
+ rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=42)
+ st_from_rips = rips_complex.create_simplex_tree(max_dimension=1)
+ assert list(st.get_filtration()) == list(st_from_rips.get_filtration())
+
diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py
index c18d2484..f3664d39 100755
--- a/src/python/test/test_euclidean_witness_complex.py
+++ b/src/python/test/test_euclidean_witness_complex.py
@@ -40,7 +40,7 @@ def test_witness_complex():
assert landmarks[1] == euclidean_witness_complex.get_point(1)
assert landmarks[2] == euclidean_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([0, 1], 0.0),
@@ -78,13 +78,13 @@ def test_strong_witness_complex():
assert landmarks[1] == euclidean_strong_witness_complex.get_point(1)
assert landmarks[2] == euclidean_strong_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
+ assert list(simplex_tree.get_filtration()) == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
simplex_tree = euclidean_strong_witness_complex.create_simplex_tree(
max_alpha_square=100.0
)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py
new file mode 100755
index 00000000..a87ec212
--- /dev/null
+++ b/src/python/test/test_knn.py
@@ -0,0 +1,130 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.knn import KNearestNeighbors
+import numpy as np
+import pytest
+
+
+def test_knn_explicit():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ knn = KNearestNeighbors(2, metric="manhattan", return_distance=True, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]]))
+ assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops", enable_autodiff=True)
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+ r = (
+ KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+
+ dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+ # Second time in parallel
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, n_jobs=2)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+
+
+def test_knn_compare():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = (
+ KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query)
+ )
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3)
+
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query)
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query)
+ )
+ assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0])
+ d0 = pytest.approx(r0[1])
+ assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0
+
+
+def test_knn_nop():
+ # This doesn't look super useful...
+ p = np.array([[0.0]])
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="sklearn"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="ckdtree"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="keops"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, metric="precomputed"
+ ).fit_transform(p)
diff --git a/src/python/test/test_off.py b/src/python/test/test_off.py
new file mode 100644
index 00000000..aea1941b
--- /dev/null
+++ b/src/python/test/test_off.py
@@ -0,0 +1,21 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2022 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi as gd
+import numpy as np
+import pytest
+
+
+def test_off_rw():
+ for dim in range(2, 6):
+ X = np.random.rand(123, dim)
+ gd.write_points_to_off_file("rand.off", X)
+ Y = gd.read_points_from_off_file("rand.off")
+ assert Y == pytest.approx(X)
diff --git a/src/python/test/test_persistence_graphical_tools.py b/src/python/test/test_persistence_graphical_tools.py
new file mode 100644
index 00000000..0e2ac3f8
--- /dev/null
+++ b/src/python/test/test_persistence_graphical_tools.py
@@ -0,0 +1,122 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi as gd
+import numpy as np
+import matplotlib as plt
+import pytest
+import warnings
+
+
+def test_array_handler():
+ diags = np.array([[1, 2], [3, 4], [5, 6]], float)
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ np.testing.assert_array_equal(arr_diags[idx][1], diags[idx])
+
+ diags = [(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ assert arr_diags[idx][1] == diags[idx]
+
+ diags = [(0, (1.0, 2.0)), (0, (3.0, 4.0)), (0, (5.0, 6.0))]
+ assert gd.persistence_graphical_tools._array_handler(diags) == diags
+
+
+def test_min_birth_max_death():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (0.0, 1.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (0.0, 5.0)
+
+
+def test_limit_min_birth_max_death():
+ diags = [
+ (0, (2.0, float("inf"))),
+ (0, (2.0, float("inf"))),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (2.0, 3.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (2.0, 6.0)
+
+
+def test_limit_to_max_intervals():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ # check no warnings if max_intervals equals to the diagrams number
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 10, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are not sorted
+ assert truncated_diags == diags
+
+ # check warning if max_intervals lower than the diagrams number
+ with pytest.warns(UserWarning) as record:
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 5, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are truncated and sorted by life time
+ assert truncated_diags == [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ ]
+ assert len(record) == 1
+
+
+def _limit_plot_persistence(function):
+ pplot = function(persistence=[])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+
+
+def test_limit_plot_persistence():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _limit_plot_persistence(function)
+
+
+def _non_existing_persistence_file(function):
+ with pytest.raises(FileNotFoundError):
+ function(persistence_file="pouetpouettralala.toubiloubabdou")
+
+
+def test_non_existing_persistence_file():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _non_existing_persistence_file(function)
diff --git a/src/python/test/test_reader_utils.py b/src/python/test/test_reader_utils.py
index 90da6651..fdfddc4b 100755
--- a/src/python/test/test_reader_utils.py
+++ b/src/python/test/test_reader_utils.py
@@ -8,8 +8,9 @@
- YYYY/MM Author: Description of the modification
"""
-import gudhi
+import gudhi as gd
import numpy as np
+from pytest import raises
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2017 Inria"
@@ -18,7 +19,7 @@ __license__ = "MIT"
def test_non_existing_csv_file():
# Try to open a non existing file
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
csv_file="pouetpouettralala.toubiloubabdou"
)
assert matrix == []
@@ -29,8 +30,8 @@ def test_full_square_distance_matrix_csv_file():
test_file = open("full_square_distance_matrix.csv", "w")
test_file.write("0;1;2;3;\n1;0;4;5;\n2;4;0;6;\n3;5;6;0;")
test_file.close()
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
- csv_file="full_square_distance_matrix.csv"
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
+ csv_file="full_square_distance_matrix.csv", separator=";"
)
assert matrix == [[], [1.0], [2.0, 4.0], [3.0, 5.0, 6.0]]
@@ -40,7 +41,7 @@ def test_lower_triangular_distance_matrix_csv_file():
test_file = open("lower_triangular_distance_matrix.csv", "w")
test_file.write("\n1,\n2,3,\n4,5,6,\n7,8,9,10,")
test_file.close()
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
csv_file="lower_triangular_distance_matrix.csv", separator=","
)
assert matrix == [[], [1.0], [2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0]]
@@ -48,11 +49,11 @@ def test_lower_triangular_distance_matrix_csv_file():
def test_non_existing_persistence_file():
# Try to open a non existing file
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="pouetpouettralala.toubiloubabdou"
)
assert persistence == []
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="pouetpouettralala.toubiloubabdou", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [])
@@ -65,21 +66,21 @@ def test_read_persistence_intervals_without_dimension():
"# Simple persistence diagram without dimension\n2.7 3.7\n9.6 14.\n34.2 34.974\n3. inf"
)
test_file.close()
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers"
)
np.testing.assert_array_equal(
persistence, [(2.7, 3.7), (9.6, 14.0), (34.2, 34.974), (3.0, float("Inf"))]
)
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers", only_this_dim=0
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="persistence_intervals_without_dimension.pers"
)
assert persistence == {
@@ -94,29 +95,29 @@ def test_read_persistence_intervals_with_dimension():
"# Simple persistence diagram with dimension\n0 2.7 3.7\n1 9.6 14.\n3 34.2 34.974\n1 3. inf"
)
test_file.close()
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers"
)
np.testing.assert_array_equal(
persistence, [(2.7, 3.7), (9.6, 14.0), (34.2, 34.974), (3.0, float("Inf"))]
)
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=0
)
np.testing.assert_array_equal(persistence, [(2.7, 3.7)])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [(9.6, 14.0), (3.0, float("Inf"))])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=2
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=3
)
np.testing.assert_array_equal(persistence, [(34.2, 34.974)])
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="persistence_intervals_with_dimension.pers"
)
assert persistence == {
diff --git a/src/python/test/test_remote_datasets.py b/src/python/test/test_remote_datasets.py
new file mode 100644
index 00000000..e5d2de82
--- /dev/null
+++ b/src/python/test/test_remote_datasets.py
@@ -0,0 +1,87 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from gudhi.datasets import remote
+
+import shutil
+import io
+import sys
+import pytest
+
+from os.path import isdir, expanduser, exists
+from os import remove, environ
+
+def test_data_home():
+ # Test _get_data_home and clear_data_home on new empty folder
+ empty_data_home = remote._get_data_home(data_home="empty_folder_for_test")
+ assert isdir(empty_data_home)
+
+ remote.clear_data_home(data_home=empty_data_home)
+ assert not isdir(empty_data_home)
+
+def test_fetch_remote():
+ # Test fetch with a wrong checksum
+ with pytest.raises(OSError):
+ remote._fetch_remote("https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/spiral_2d/spiral_2d.npy", "tmp_spiral_2d.npy", file_checksum = 'XXXXXXXXXX')
+ assert not exists("tmp_spiral_2d.npy")
+
+def _get_bunny_license_print(accept_license = False):
+ capturedOutput = io.StringIO()
+ # Redirect stdout
+ sys.stdout = capturedOutput
+
+ bunny_arr = remote.fetch_bunny("./tmp_for_test/bunny.npy", accept_license)
+ assert bunny_arr.shape == (35947, 3)
+ del bunny_arr
+ remove("./tmp_for_test/bunny.npy")
+
+ # Reset redirect
+ sys.stdout = sys.__stdout__
+ return capturedOutput
+
+def test_print_bunny_license():
+ # Test not printing bunny.npy LICENSE when accept_license = True
+ assert "" == _get_bunny_license_print(accept_license = True).getvalue()
+ # Test printing bunny.LICENSE file when fetching bunny.npy with accept_license = False (default)
+ with open("./tmp_for_test/bunny.LICENSE") as f:
+ assert f.read().rstrip("\n") == _get_bunny_license_print().getvalue().rstrip("\n")
+ shutil.rmtree("./tmp_for_test")
+
+def test_fetch_remote_datasets_wrapped():
+ # Test fetch_spiral_2d and fetch_bunny wrapping functions with data directory different from default (twice, to test case of already fetched files)
+ # Default case is not tested because it would fail in case the user sets the 'GUDHI_DATA' environment variable locally
+ for i in range(2):
+ spiral_2d_arr = remote.fetch_spiral_2d("./another_fetch_folder_for_test/spiral_2d.npy")
+ assert spiral_2d_arr.shape == (114562, 2)
+
+ bunny_arr = remote.fetch_bunny("./another_fetch_folder_for_test/bunny.npy")
+ assert bunny_arr.shape == (35947, 3)
+
+ # Check that the directory was created
+ assert isdir("./another_fetch_folder_for_test")
+ # Check downloaded files
+ assert exists("./another_fetch_folder_for_test/spiral_2d.npy")
+ assert exists("./another_fetch_folder_for_test/bunny.npy")
+ assert exists("./another_fetch_folder_for_test/bunny.LICENSE")
+
+ # Remove test folders
+ del spiral_2d_arr
+ del bunny_arr
+ shutil.rmtree("./another_fetch_folder_for_test")
+
+def test_gudhi_data_env():
+ # Set environment variable "GUDHI_DATA"
+ environ["GUDHI_DATA"] = "./test_folder_from_env_var"
+ bunny_arr = remote.fetch_bunny()
+ assert bunny_arr.shape == (35947, 3)
+ assert exists("./test_folder_from_env_var/points/bunny/bunny.npy")
+ assert exists("./test_folder_from_env_var/points/bunny/bunny.LICENSE")
+ # Remove test folder
+ del bunny_arr
+ shutil.rmtree("./test_folder_from_env_var")
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index dba7f952..f4ffbdc1 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -1,12 +1,269 @@
import os
import sys
import matplotlib.pyplot as plt
+import numpy as np
+import pytest
+import random
+
+from sklearn.cluster import KMeans
+
+# Vectorization
+from gudhi.representations import (Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
+ TopologicalVector, PersistenceImage, Entropy)
+
+# Preprocessing
+from gudhi.representations import (BirthPersistenceTransform, Clamping, DiagramScaler, Padding, ProminentPoints, \
+ DiagramSelector)
+
+# Kernel
+from gudhi.representations import (PersistenceWeightedGaussianKernel, \
+ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
+ SlicedWassersteinKernel, PersistenceFisherKernel, WassersteinDistance)
+
def test_representations_examples():
# Disable graphics for testing purposes
- plt.show = lambda:None
+ plt.show = lambda: None
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(here + "/../example")
import diagram_vectorizations_distances_kernels
return None
+
+
+from gudhi.representations.vector_methods import Atol
+from gudhi.representations.metrics import *
+from gudhi.representations.kernel_methods import *
+
+
+def _n_diags(n):
+ l = []
+ for _ in range(n):
+ a = np.random.rand(50, 2)
+ a[:, 1] += a[:, 0] # So that y >= x
+ l.append(a)
+ return l
+
+
+def test_multiple():
+ l1 = _n_diags(9)
+ l2 = _n_diags(11)
+ l1b = l1.copy()
+ d1 = pairwise_persistence_diagram_distances(l1, e=0.00001, n_jobs=4)
+ d2 = BottleneckDistance(epsilon=0.00001).fit_transform(l1)
+ d3 = pairwise_persistence_diagram_distances(l1, l1b, e=0.00001, n_jobs=4)
+ assert d1 == pytest.approx(d2)
+ assert d3 == pytest.approx(d2, abs=1e-5) # Because of 0 entries (on the diagonal)
+ d1 = pairwise_persistence_diagram_distances(l1, l2, metric="wasserstein", order=2, internal_p=2)
+ d2 = WassersteinDistance(order=2, internal_p=2, n_jobs=4).fit(l2).transform(l1)
+ print(d1.shape, d2.shape)
+ assert d1 == pytest.approx(d2, rel=0.02)
+
+
+# Test sorted values as points order can be inverted, and sorted test is not documentation-friendly
+# Note the test below must be up to date with the Atol class documentation
+def test_atol_doc():
+ a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ c = np.array([[3, 2, -1], [1, 2, -1]])
+
+ atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
+ # Atol will do
+ # X = np.concatenate([a,b,c])
+ # kmeans = KMeans(n_clusters=2, random_state=202006).fit(X)
+ # kmeans.labels_ will be : array([1, 0, 1, 0, 0, 1, 0, 0])
+ first_cluster = np.asarray([a[0], a[2], b[2]])
+ second_cluster = np.asarray([a[1], b[0], b[2], c[0], c[1]])
+
+ # Check the center of the first_cluster and second_cluster are in Atol centers
+ centers = atol_vectoriser.fit(X=[a, b, c]).centers
+ np.isclose(centers, first_cluster.mean(axis=0)).all(1).any()
+ np.isclose(centers, second_cluster.mean(axis=0)).all(1).any()
+
+ vectorization = atol_vectoriser.transform(X=[a, b, c])
+ assert np.allclose(vectorization[0], atol_vectoriser(a))
+ assert np.allclose(vectorization[1], atol_vectoriser(b))
+ assert np.allclose(vectorization[2], atol_vectoriser(c))
+
+
+def test_dummy_atol():
+ a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ c = np.array([[3, 2, -1], [1, 2, -1]])
+
+ for weighting_method in ["cloud", "iidproba"]:
+ for contrast in ["gaussian", "laplacian", "indicator"]:
+ atol_vectoriser = Atol(
+ quantiser=KMeans(n_clusters=1, random_state=202006),
+ weighting_method=weighting_method,
+ contrast=contrast,
+ )
+ atol_vectoriser.fit([a, b, c])
+ atol_vectoriser(a)
+ atol_vectoriser.transform(X=[a, b, c])
+
+
+from gudhi.representations.vector_methods import BettiCurve
+
+def test_infinity():
+ a = np.array([[1.0, 8.0], [2.0, np.inf], [3.0, 4.0]])
+ c = BettiCurve(20, [0.0, 10.0])(a)
+ assert c[1] == 0
+ assert c[7] == 3
+ assert c[9] == 2
+
+def test_preprocessing_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ assert not np.any(BirthPersistenceTransform()(empty_diag))
+ assert not np.any(Clamping().fit_transform(empty_diag))
+ assert not np.any(DiagramScaler()(empty_diag))
+ assert not np.any(Padding()(empty_diag))
+ assert not np.any(ProminentPoints()(empty_diag))
+ assert not np.any(DiagramSelector()(empty_diag))
+
+def pow(n):
+ return lambda x: np.power(x[1]-x[0],n)
+
+def test_vectorization_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ random_resolution = random.randint(50,100)*10 # between 500 and 1000
+ print("resolution = ", random_resolution)
+ lsc = Landscape(resolution=random_resolution)(empty_diag)
+ assert not np.any(lsc)
+ assert lsc.shape[0]%random_resolution == 0
+ slt = Silhouette(resolution=random_resolution, weight=pow(2))(empty_diag)
+ assert not np.any(slt)
+ assert slt.shape[0] == random_resolution
+ btc = BettiCurve(resolution=random_resolution)(empty_diag)
+ assert not np.any(btc)
+ assert btc.shape[0] == random_resolution
+ cpp = ComplexPolynomial(threshold=random_resolution, polynomial_type="T")(empty_diag)
+ assert not np.any(cpp)
+ assert cpp.shape[0] == random_resolution
+ tpv = TopologicalVector(threshold=random_resolution)(empty_diag)
+ assert tpv.shape[0] == random_resolution
+ assert not np.any(tpv)
+ prmg = PersistenceImage(resolution=[random_resolution,random_resolution])(empty_diag)
+ assert not np.any(prmg)
+ assert prmg.shape[0] == random_resolution * random_resolution
+ sce = Entropy(mode="scalar", resolution=random_resolution)(empty_diag)
+ assert not np.any(sce)
+ assert sce.shape[0] == 1
+ scv = Entropy(mode="vector", normalized=False, resolution=random_resolution)(empty_diag)
+ assert not np.any(scv)
+ assert scv.shape[0] == random_resolution
+
+def test_entropy_miscalculation():
+ diag_ex = np.array([[0.0,1.0], [0.0,1.0], [0.0,2.0]])
+ def pe(pd):
+ l = pd[:,1] - pd[:,0]
+ l = l/sum(l)
+ return -np.dot(l, np.log(l))
+ sce = Entropy(mode="scalar")
+ assert [[pe(diag_ex)]] == sce.fit_transform([diag_ex])
+ sce = Entropy(mode="vector", resolution=4, normalized=False, keep_endpoints=True)
+ pef = [-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
+ -1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
+ -1/2*np.log(1/2),
+ 0.0]
+ assert all(([pef] == sce.fit_transform([diag_ex]))[0])
+ sce = Entropy(mode="vector", resolution=4, normalized=True)
+ pefN = (sce.fit_transform([diag_ex]))[0]
+ area = np.linalg.norm(pefN, ord=1)
+ assert area==pytest.approx(1)
+
+def test_kernel_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ assert SlicedWassersteinDistance(num_directions=100)(empty_diag, empty_diag) == 0.
+ assert SlicedWassersteinKernel(num_directions=100, bandwidth=1.)(empty_diag, empty_diag) == 1.
+ assert WassersteinDistance(mode="hera", delta=0.0001)(empty_diag, empty_diag) == 0.
+ assert WassersteinDistance(mode="pot")(empty_diag, empty_diag) == 0.
+ assert BottleneckDistance(epsilon=.001)(empty_diag, empty_diag) == 0.
+ assert BottleneckDistance()(empty_diag, empty_diag) == 0.
+# PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))(empty_diag, empty_diag)
+# PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))(empty_diag, empty_diag)
+# PersistenceScaleSpaceKernel(bandwidth=1.)(empty_diag, empty_diag)
+# PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
+# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)(empty_diag, empty_diag)
+# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
+
+
+def test_silhouette_permutation_invariance():
+ dgm = _n_diags(1)[0]
+ dgm_permuted = dgm[np.random.permutation(dgm.shape[0]).astype(int)]
+ random_resolution = random.randint(50, 100) * 10
+ slt = Silhouette(resolution=random_resolution, weight=pow(2))
+
+ assert np.all(np.isclose(slt(dgm), slt(dgm_permuted)))
+
+
+def test_silhouette_multiplication_invariance():
+ dgm = _n_diags(1)[0]
+ n_repetitions = np.random.randint(2, high=10)
+ dgm_augmented = np.repeat(dgm, repeats=n_repetitions, axis=0)
+
+ random_resolution = random.randint(50, 100) * 10
+ slt = Silhouette(resolution=random_resolution, weight=pow(2))
+ assert np.all(np.isclose(slt(dgm), slt(dgm_augmented)))
+
+
+def test_silhouette_numeric():
+ dgm = np.array([[2., 3.], [5., 6.]])
+ slt = Silhouette(resolution=9, weight=pow(1), sample_range=[2., 6.])
+ #slt.fit([dgm])
+ # x_values = array([2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.])
+
+ expected_silhouette = np.array([0., 0.5, 0., 0., 0., 0., 0., 0.5, 0.])/np.sqrt(2)
+ output_silhouette = slt(dgm)
+ assert np.all(np.isclose(output_silhouette, expected_silhouette))
+
+
+def test_landscape_small_persistence_invariance():
+ dgm = np.array([[2., 6.], [2., 5.], [3., 7.]])
+ small_persistence_pts = np.random.rand(10, 2)
+ small_persistence_pts[:, 1] += small_persistence_pts[:, 0]
+ small_persistence_pts += np.min(dgm)
+ dgm_augmented = np.concatenate([dgm, small_persistence_pts], axis=0)
+
+ lds = Landscape(num_landscapes=2, resolution=5)
+ lds_dgm, lds_dgm_augmented = lds(dgm), lds(dgm_augmented)
+
+ assert np.all(np.isclose(lds_dgm, lds_dgm_augmented))
+
+
+def test_landscape_numeric():
+ dgm = np.array([[2., 6.], [3., 5.]])
+ lds_ref = np.array([
+ 0., 0.5, 1., 1.5, 2., 1.5, 1., 0.5, 0., # tent of [2, 6]
+ 0., 0., 0., 0.5, 1., 0.5, 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+ ])
+ lds_ref *= np.sqrt(2)
+ lds = Landscape(num_landscapes=4, resolution=9, sample_range=[2., 6.])
+ lds_dgm = lds(dgm)
+ assert np.all(np.isclose(lds_dgm, lds_ref))
+
+
+def test_landscape_nan_range():
+ dgm = np.array([[2., 6.], [3., 5.]])
+ lds = Landscape(num_landscapes=2, resolution=9, sample_range=[np.nan, 6.])
+ lds_dgm = lds(dgm)
+ assert (lds.sample_range_fixed[0] == 2) & (lds.sample_range_fixed[1] == 6)
+ assert lds.new_resolution == 10
+
+def test_endpoints():
+ diags = [ np.array([[2., 3.]]) ]
+ for vec in [ Landscape(), Silhouette(), BettiCurve(), Entropy(mode="vector") ]:
+ vec.fit(diags)
+ assert vec.grid_[0] > 2 and vec.grid_[-1] < 3
+ for vec in [ Landscape(keep_endpoints=True), Silhouette(keep_endpoints=True), BettiCurve(keep_endpoints=True), Entropy(mode="vector", keep_endpoints=True)]:
+ vec.fit(diags)
+ assert vec.grid_[0] == 2 and vec.grid_[-1] == 3
+ vec = BettiCurve(resolution=None)
+ vec.fit(diags)
+ assert np.equal(vec.grid_, [-np.inf, 2., 3.]).all()
+
+def test_get_params():
+ for vec in [ Landscape(), Silhouette(), BettiCurve(), Entropy(mode="vector") ]:
+ vec.get_params()
diff --git a/src/python/test/test_representations_preprocessing.py b/src/python/test/test_representations_preprocessing.py
new file mode 100644
index 00000000..838cf30c
--- /dev/null
+++ b/src/python/test/test_representations_preprocessing.py
@@ -0,0 +1,39 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.representations.preprocessing import DimensionSelector
+import numpy as np
+import pytest
+
+H0_0 = np.array([0.0, 0.0])
+H1_0 = np.array([1.0, 0.0])
+H0_1 = np.array([0.0, 1.0])
+H1_1 = np.array([1.0, 1.0])
+H0_2 = np.array([0.0, 2.0])
+H1_2 = np.array([1.0, 2.0])
+
+
+def test_dimension_selector():
+ X = [[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]]
+ ds = DimensionSelector(index=0)
+ h0 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h0[0], H0_0)
+ np.testing.assert_array_equal(h0[1], H0_1)
+ np.testing.assert_array_equal(h0[2], H0_2)
+
+ ds = DimensionSelector(index=1)
+ h1 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h1[0], H1_0)
+ np.testing.assert_array_equal(h1[1], H1_1)
+ np.testing.assert_array_equal(h1[2], H1_2)
+
+ ds = DimensionSelector(index=2)
+ with pytest.raises(IndexError):
+ h2 = ds.fit_transform([[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]])
diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py
index b02a68e1..a2f43a1b 100755
--- a/src/python/test/test_rips_complex.py
+++ b/src/python/test/test_rips_complex.py
@@ -32,7 +32,7 @@ def test_rips_from_points():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -44,6 +44,7 @@ def test_rips_from_points():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
@@ -95,7 +96,7 @@ def test_rips_from_distance_matrix():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -107,6 +108,7 @@ def test_rips_from_distance_matrix():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
@@ -131,3 +133,24 @@ def test_filtered_rips_from_distance_matrix():
assert simplex_tree.num_simplices() == 8
assert simplex_tree.num_vertices() == 4
+
+
+def test_sparse_with_multiplicity():
+ points = [
+ [3, 4],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [3, 4.1],
+ ]
+ rips = RipsComplex(points=points, sparse=0.01)
+ simplex_tree = rips.create_simplex_tree(max_dimension=2)
+ assert simplex_tree.num_simplices() == 7
+ diag = simplex_tree.persistence()
diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py
new file mode 100755
index 00000000..c567d4c1
--- /dev/null
+++ b/src/python/test/test_simplex_generators.py
@@ -0,0 +1,64 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi
+import numpy as np
+
+
+def test_flag_generators():
+ pts = np.array([[0, 0], [0, 1.01], [1, 0], [1.02, 1.03], [100, 0], [100, 3.01], [103, 0], [103.02, 3.03]])
+ r = gudhi.RipsComplex(points=pts, max_edge_length=4)
+ st = r.create_simplex_tree(max_dimension=50)
+ st.persistence()
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], [[2, 2, 0], [1, 1, 0], [3, 3, 1], [6, 6, 4], [5, 5, 4], [7, 7, 5]])
+ assert len(g[1]) == 1
+ assert np.array_equal(g[1][0], [[3, 2, 2, 1]])
+ assert np.array_equal(g[2], [0, 4])
+ assert len(g[3]) == 1
+ assert np.array_equal(g[3][0], [[7, 6]])
+ # Compare trivial cases (where the simplex is the generator) with persistence_pairs.
+ # This still makes assumptions on the order of vertices in a simplex and could be more robust.
+ pairs = st.persistence_pairs()
+ assert {tuple(i) for i in g[0]} == {(i[0][0],) + tuple(i[1]) for i in pairs if len(i[0]) == 1 and len(i[1]) != 0}
+ assert {(i[0], i[1]) for i in g[1][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) != 0}
+ assert set(g[2]) == {i[0][0] for i in pairs if len(i[0]) == 1 and len(i[1]) == 0}
+ assert {(i[0], i[1]) for i in g[3][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) == 0}
+
+
+def test_lower_star_generators():
+ st = gudhi.SimplexTree()
+ st.insert([0, 1, 2], -10)
+ st.insert([0, 3], -10)
+ st.insert([1, 3], -10)
+ st.assign_filtration([2], -1)
+ st.assign_filtration([3], 0)
+ st.assign_filtration([0], 1)
+ st.assign_filtration([1], 2)
+ st.make_filtration_non_decreasing()
+ st.persistence(min_persistence=-1)
+ g = st.lower_star_persistence_generators()
+ assert len(g[0]) == 2
+ assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]])
+ assert np.array_equal(g[0][1], [[1, 1]])
+ assert len(g[1]) == 2
+ assert np.array_equal(g[1][0], [2])
+ assert np.array_equal(g[1][1], [1])
+
+
+def test_empty():
+ st = gudhi.SimplexTree()
+ st.persistence()
+ assert st.lower_star_persistence_generators() == ([], [])
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], np.empty((0, 3)))
+ assert g[1] == []
+ assert np.array_equal(g[2], [])
+ assert g[3] == []
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 1822c43b..2ccbfbf5 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -9,6 +9,8 @@
"""
from gudhi import SimplexTree
+import numpy as np
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -45,7 +47,6 @@ def test_insertion():
assert st.find([2, 3]) == False
# filtration test
- st.initialize_filtration()
assert st.filtration([0, 1, 2]) == 4.0
assert st.filtration([0, 2]) == 4.0
assert st.filtration([1, 2]) == 4.0
@@ -55,7 +56,7 @@ def test_insertion():
assert st.filtration([1]) == 0.0
# skeleton test
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1, 2], 4.0),
([0, 1], 0.0),
([0, 2], 4.0),
@@ -64,7 +65,7 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(1) == [
+ assert list(st.get_skeleton(1)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -72,12 +73,12 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(0) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
+ assert list(st.get_skeleton(0)) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
# remove_maximal_simplex test
assert st.get_cofaces([0, 1, 2], 1) == []
st.remove_maximal_simplex([0, 1, 2])
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -92,7 +93,6 @@ def test_insertion():
assert st.find([1]) == True
assert st.find([2]) == True
- st.initialize_filtration()
assert st.persistence(persistence_dim_max=True) == [
(1, (4.0, float("inf"))),
(0, (0.0, float("inf"))),
@@ -126,7 +126,8 @@ def test_expansion():
assert st.num_vertices() == 7
assert st.num_simplices() == 17
- assert st.get_filtration() == [
+
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -149,9 +150,8 @@ def test_expansion():
st.expansion(3)
assert st.num_vertices() == 7
assert st.num_simplices() == 22
- st.initialize_filtration()
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -248,3 +248,399 @@ def test_make_filtration_non_decreasing():
assert st.filtration([3, 4, 5]) == 2.0
assert st.filtration([3, 4]) == 2.0
assert st.filtration([4, 5]) == 2.0
+
+
+def test_extend_filtration():
+
+ # Inserted simplex:
+ # 5 4
+ # o o
+ # / \ /
+ # o o
+ # /2\ /3
+ # o o
+ # 1 0
+
+ st = SimplexTree()
+ st.insert([0, 2])
+ st.insert([1, 2])
+ st.insert([0, 3])
+ st.insert([2, 5])
+ st.insert([3, 4])
+ st.insert([3, 5])
+ st.assign_filtration([0], 1.0)
+ st.assign_filtration([1], 2.0)
+ st.assign_filtration([2], 3.0)
+ st.assign_filtration([3], 4.0)
+ st.assign_filtration([4], 5.0)
+ st.assign_filtration([5], 6.0)
+
+ assert list(st.get_filtration()) == [
+ ([0, 2], 0.0),
+ ([1, 2], 0.0),
+ ([0, 3], 0.0),
+ ([3, 4], 0.0),
+ ([2, 5], 0.0),
+ ([3, 5], 0.0),
+ ([0], 1.0),
+ ([1], 2.0),
+ ([2], 3.0),
+ ([3], 4.0),
+ ([4], 5.0),
+ ([5], 6.0),
+ ]
+
+ st.extend_filtration()
+
+ assert list(st.get_filtration()) == [
+ ([6], -3.0),
+ ([0], -2.0),
+ ([1], -1.8),
+ ([2], -1.6),
+ ([0, 2], -1.6),
+ ([1, 2], -1.6),
+ ([3], -1.4),
+ ([0, 3], -1.4),
+ ([4], -1.2),
+ ([3, 4], -1.2),
+ ([5], -1.0),
+ ([2, 5], -1.0),
+ ([3, 5], -1.0),
+ ([5, 6], 1.0),
+ ([4, 6], 1.2),
+ ([3, 6], 1.4),
+ ([3, 4, 6], 1.4),
+ ([3, 5, 6], 1.4),
+ ([2, 6], 1.6),
+ ([2, 5, 6], 1.6),
+ ([1, 6], 1.8),
+ ([1, 2, 6], 1.8),
+ ([0, 6], 2.0),
+ ([0, 2, 6], 2.0),
+ ([0, 3, 6], 2.0),
+ ]
+
+ dgms = st.extended_persistence(min_persistence=-1.0)
+ assert len(dgms) == 4
+ # Sort by (death-birth) descending - we are only interested in those with the longest life span
+ for idx in range(4):
+ dgms[idx] = sorted(dgms[idx], key=lambda x: (-abs(x[1][0] - x[1][1])))
+
+ assert dgms[0][0][1][0] == pytest.approx(2.0)
+ assert dgms[0][0][1][1] == pytest.approx(3.0)
+ assert dgms[1][0][1][0] == pytest.approx(5.0)
+ assert dgms[1][0][1][1] == pytest.approx(4.0)
+ assert dgms[2][0][1][0] == pytest.approx(1.0)
+ assert dgms[2][0][1][1] == pytest.approx(6.0)
+ assert dgms[3][0][1][0] == pytest.approx(6.0)
+ assert dgms[3][0][1][1] == pytest.approx(1.0)
+
+
+def test_simplices_iterator():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2], filtration=4.0) == True
+ assert st.insert([2, 3, 4], filtration=2.0) == True
+
+ for simplex in st.get_simplices():
+ print("simplex is: ", simplex[0])
+ assert st.find(simplex[0]) == True
+ print("filtration is: ", simplex[1])
+ assert st.filtration(simplex[0]) == simplex[1]
+
+
+def test_collapse_edges():
+ st = SimplexTree()
+
+ assert st.insert([0, 1], filtration=1.0) == True
+ assert st.insert([1, 2], filtration=1.0) == True
+ assert st.insert([2, 3], filtration=1.0) == True
+ assert st.insert([0, 3], filtration=1.0) == True
+ assert st.insert([0, 2], filtration=2.0) == True
+ assert st.insert([1, 3], filtration=2.0) == True
+
+ assert st.num_simplices() == 10
+
+ st.collapse_edges()
+ assert st.num_simplices() == 9
+ assert st.find([0, 2]) == False # [1, 3] would be fine as well
+ for simplex in st.get_skeleton(0):
+ assert simplex[1] == 1.0
+
+
+def test_reset_filtration():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2], 3.0) == True
+ assert st.insert([0, 3], 2.0) == True
+ assert st.insert([3, 4, 5], 3.0) == True
+ assert st.insert([0, 1, 6, 7], 4.0) == True
+
+ # Guaranteed by construction
+ for simplex in st.get_simplices():
+ assert st.filtration(simplex[0]) >= 2.0
+
+ # dimension until 5 even if simplex tree is of dimension 3 to test the limits
+ for dimension in range(5, -1, -1):
+ st.reset_filtration(0.0, dimension)
+ for simplex in st.get_skeleton(3):
+ print(simplex)
+ if len(simplex[0]) < (dimension) + 1:
+ assert st.filtration(simplex[0]) >= 2.0
+ else:
+ assert st.filtration(simplex[0]) == 0.0
+
+
+def test_boundaries_iterator():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2, 3], filtration=1.0) == True
+ assert st.insert([1, 2, 3, 4], filtration=2.0) == True
+
+ assert list(st.get_boundaries([1, 2, 3])) == [([1, 2], 1.0), ([1, 3], 1.0), ([2, 3], 1.0)]
+ assert list(st.get_boundaries([2, 3, 4])) == [([2, 3], 1.0), ([2, 4], 2.0), ([3, 4], 2.0)]
+ assert list(st.get_boundaries([2])) == []
+
+ with pytest.raises(RuntimeError):
+ list(st.get_boundaries([]))
+
+ with pytest.raises(RuntimeError):
+ list(st.get_boundaries([0, 4])) # (0, 4) does not exist
+
+ with pytest.raises(RuntimeError):
+ list(st.get_boundaries([6])) # (6) does not exist
+
+
+def test_persistence_intervals_in_dimension():
+ # Here is our triangulation of a 2-torus - taken from https://dioscuri-tda.org/Paris_TDA_Tutorial_2021.html
+ # 0-----3-----4-----0
+ # | \ | \ | \ | \ |
+ # | \ | \ | \| \ |
+ # 1-----8-----7-----1
+ # | \ | \ | \ | \ |
+ # | \ | \ | \ | \ |
+ # 2-----5-----6-----2
+ # | \ | \ | \ | \ |
+ # | \ | \ | \ | \ |
+ # 0-----3-----4-----0
+ st = SimplexTree()
+ st.insert([0, 1, 8])
+ st.insert([0, 3, 8])
+ st.insert([3, 7, 8])
+ st.insert([3, 4, 7])
+ st.insert([1, 4, 7])
+ st.insert([0, 1, 4])
+ st.insert([1, 2, 5])
+ st.insert([1, 5, 8])
+ st.insert([5, 6, 8])
+ st.insert([6, 7, 8])
+ st.insert([2, 6, 7])
+ st.insert([1, 2, 7])
+ st.insert([0, 2, 3])
+ st.insert([2, 3, 5])
+ st.insert([3, 4, 5])
+ st.insert([4, 5, 6])
+ st.insert([0, 4, 6])
+ st.insert([0, 2, 6])
+ st.compute_persistence(persistence_dim_max=True)
+
+ H0 = st.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[0.0, float("inf")]]))
+ H1 = st.persistence_intervals_in_dimension(1)
+ assert np.array_equal(H1, np.array([[0.0, float("inf")], [0.0, float("inf")]]))
+ H2 = st.persistence_intervals_in_dimension(2)
+ assert np.array_equal(H2, np.array([[0.0, float("inf")]]))
+ # Test empty case
+ assert st.persistence_intervals_in_dimension(3).shape == (0, 2)
+
+
+def test_equality_operator():
+ st1 = SimplexTree()
+ st2 = SimplexTree()
+
+ assert st1 == st2
+
+ st1.insert([1, 2, 3], 4.0)
+ assert st1 != st2
+
+ st2.insert([1, 2, 3], 4.0)
+ assert st1 == st2
+
+
+def test_simplex_tree_deep_copy():
+ st = SimplexTree()
+ st.insert([1, 2, 3], 0.0)
+ # compute persistence only on the original
+ st.compute_persistence()
+
+ st_copy = st.copy()
+ assert st_copy == st
+ st_filt_list = list(st.get_filtration())
+
+ # check persistence is not copied
+ assert st.__is_persistence_defined() == True
+ assert st_copy.__is_persistence_defined() == False
+
+ # remove something in the copy and check the copy is included in the original
+ st_copy.remove_maximal_simplex([1, 2, 3])
+ a_filt_list = list(st_copy.get_filtration())
+ assert len(a_filt_list) < len(st_filt_list)
+
+ for a_splx in a_filt_list:
+ assert a_splx in st_filt_list
+
+ # test double free
+ del st
+ del st_copy
+
+
+def test_simplex_tree_deep_copy_constructor():
+ st = SimplexTree()
+ st.insert([1, 2, 3], 0.0)
+ # compute persistence only on the original
+ st.compute_persistence()
+
+ st_copy = SimplexTree(st)
+ assert st_copy == st
+ st_filt_list = list(st.get_filtration())
+
+ # check persistence is not copied
+ assert st.__is_persistence_defined() == True
+ assert st_copy.__is_persistence_defined() == False
+
+ # remove something in the copy and check the copy is included in the original
+ st_copy.remove_maximal_simplex([1, 2, 3])
+ a_filt_list = list(st_copy.get_filtration())
+ assert len(a_filt_list) < len(st_filt_list)
+
+ for a_splx in a_filt_list:
+ assert a_splx in st_filt_list
+
+ # test double free
+ del st
+ del st_copy
+
+
+def test_simplex_tree_constructor_exception():
+ with pytest.raises(TypeError):
+ st = SimplexTree(other="Construction from a string shall raise an exception")
+
+
+def test_create_from_array():
+ a = np.array([[1, 4, 13, 6], [4, 3, 11, 5], [13, 11, 10, 12], [6, 5, 12, 2]])
+ st = SimplexTree.create_from_array(a, max_filtration=5.0)
+ assert list(st.get_filtration()) == [([0], 1.0), ([3], 2.0), ([1], 3.0), ([0, 1], 4.0), ([1, 3], 5.0)]
+
+
+def test_insert_edges_from_coo_matrix():
+ try:
+ from scipy.sparse import coo_matrix
+ from scipy.spatial import cKDTree
+ except ImportError:
+ print("Skipping, no SciPy")
+ return
+
+ st = SimplexTree()
+ st.insert([1, 2, 7], 7)
+ row = np.array([2, 5, 3])
+ col = np.array([1, 4, 6])
+ dat = np.array([1, 2, 3])
+ edges = coo_matrix((dat, (row, col)))
+ st.insert_edges_from_coo_matrix(edges)
+ assert list(st.get_filtration()) == [
+ ([1], 1.0),
+ ([2], 1.0),
+ ([1, 2], 1.0),
+ ([4], 2.0),
+ ([5], 2.0),
+ ([4, 5], 2.0),
+ ([3], 3.0),
+ ([6], 3.0),
+ ([3, 6], 3.0),
+ ([7], 7.0),
+ ([1, 7], 7.0),
+ ([2, 7], 7.0),
+ ([1, 2, 7], 7.0),
+ ]
+
+ pts = np.random.rand(100, 2)
+ tree = cKDTree(pts)
+ edges = tree.sparse_distance_matrix(tree, max_distance=0.15, output_type="coo_matrix")
+ st = SimplexTree()
+ st.insert_edges_from_coo_matrix(edges)
+ assert 100 < st.num_simplices() < 1000
+
+
+def test_insert_batch():
+ st = SimplexTree()
+ # vertices
+ st.insert_batch(np.array([[6, 1, 5]]), np.array([-5.0, 2.0, -3.0]))
+ # triangles
+ st.insert_batch(np.array([[2, 10], [5, 0], [6, 11]]), np.array([4.0, 0.0]))
+ # edges
+ st.insert_batch(np.array([[1, 5], [2, 5]]), np.array([1.0, 3.0]))
+
+ assert list(st.get_filtration()) == [
+ ([6], -5.0),
+ ([5], -3.0),
+ ([0], 0.0),
+ ([10], 0.0),
+ ([0, 10], 0.0),
+ ([11], 0.0),
+ ([0, 11], 0.0),
+ ([10, 11], 0.0),
+ ([0, 10, 11], 0.0),
+ ([1], 1.0),
+ ([2], 1.0),
+ ([1, 2], 1.0),
+ ([2, 5], 4.0),
+ ([2, 6], 4.0),
+ ([5, 6], 4.0),
+ ([2, 5, 6], 4.0),
+ ]
+
+
+def test_expansion_with_blocker():
+ st = SimplexTree()
+ st.insert([0, 1], 0)
+ st.insert([0, 2], 1)
+ st.insert([0, 3], 2)
+ st.insert([1, 2], 3)
+ st.insert([1, 3], 4)
+ st.insert([2, 3], 5)
+ st.insert([2, 4], 6)
+ st.insert([3, 6], 7)
+ st.insert([4, 5], 8)
+ st.insert([4, 6], 9)
+ st.insert([5, 6], 10)
+ st.insert([6], 10)
+
+ def blocker(simplex):
+ try:
+ # Block all simplices that contain vertex 6
+ simplex.index(6)
+ print(simplex, " is blocked")
+ return True
+ except ValueError:
+ print(simplex, " is accepted")
+ st.assign_filtration(simplex, st.filtration(simplex) + 1.0)
+ return False
+
+ st.expansion_with_blocker(2, blocker)
+ assert st.num_simplices() == 22
+ assert st.dimension() == 2
+ assert st.find([4, 5, 6]) == False
+ assert st.filtration([0, 1, 2]) == 4.0
+ assert st.filtration([0, 1, 3]) == 5.0
+ assert st.filtration([0, 2, 3]) == 6.0
+ assert st.filtration([1, 2, 3]) == 6.0
+
+ st.expansion_with_blocker(3, blocker)
+ assert st.num_simplices() == 23
+ assert st.dimension() == 3
+ assert st.find([4, 5, 6]) == False
+ assert st.filtration([0, 1, 2]) == 4.0
+ assert st.filtration([0, 1, 3]) == 5.0
+ assert st.filtration([0, 2, 3]) == 6.0
+ assert st.filtration([1, 2, 3]) == 6.0
+ assert st.filtration([0, 1, 2, 3]) == 7.0
diff --git a/src/python/test/test_sklearn_cubical_persistence.py b/src/python/test/test_sklearn_cubical_persistence.py
new file mode 100644
index 00000000..1c05a215
--- /dev/null
+++ b/src/python/test/test_sklearn_cubical_persistence.py
@@ -0,0 +1,59 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.sklearn.cubical_persistence import CubicalPersistence
+import numpy as np
+from sklearn import datasets
+
+CUBICAL_PERSISTENCE_H0_IMG0 = np.array([[0.0, 6.0], [0.0, 8.0], [0.0, np.inf]])
+
+
+def test_simple_constructor_from_top_cells():
+ cells = datasets.load_digits().images[0]
+ cp = CubicalPersistence(homology_dimensions=0)
+ np.testing.assert_array_equal(cp._CubicalPersistence__transform_only_this_dim(cells), CUBICAL_PERSISTENCE_H0_IMG0)
+ cp = CubicalPersistence(homology_dimensions=[0, 2])
+ diags = cp._CubicalPersistence__transform(cells)
+ assert len(diags) == 2
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+
+def test_simple_constructor_from_top_cells_list():
+ digits = datasets.load_digits().images[:10]
+ cp = CubicalPersistence(homology_dimensions=0, n_jobs=-2)
+
+ diags = cp.fit_transform(digits)
+ assert len(diags) == 10
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ cp = CubicalPersistence(homology_dimensions=[0, 1], n_jobs=-1)
+ diagsH0H1 = cp.fit_transform(digits)
+ assert len(diagsH0H1) == 10
+ for idx in range(10):
+ np.testing.assert_array_equal(diags[idx], diagsH0H1[idx][0])
+
+def test_simple_constructor_from_flattened_cells():
+ cells = datasets.load_digits().images[0]
+ # Not squared (extended) flatten cells
+ flat_cells = np.hstack((cells, np.zeros((cells.shape[0], 2)))).flatten()
+
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([flat_cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ # Not squared (extended) non-flatten cells
+ cells = np.hstack((cells, np.zeros((cells.shape[0], 2))))
+
+ # The aim of this second part of the test is to resize even if not mandatory
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py
index fe0985fa..c1cb4e3f 100755
--- a/src/python/test/test_subsampling.py
+++ b/src/python/test/test_subsampling.py
@@ -16,17 +16,9 @@ __license__ = "MIT"
def test_write_off_file_for_tests():
- file = open("subsample.off", "w")
- file.write("nOFF\n")
- file.write("2 7 0 0\n")
- file.write("1.0 1.0\n")
- file.write("7.0 0.0\n")
- file.write("4.0 6.0\n")
- file.write("9.0 6.0\n")
- file.write("0.0 14.0\n")
- file.write("2.0 19.0\n")
- file.write("9.0 17.0\n")
- file.close()
+ gudhi.write_points_to_off_file(
+ "subsample.off", [[1.0, 1.0], [7.0, 0.0], [4.0, 6.0], [9.0, 6.0], [0.0, 14.0], [2.0, 19.0], [9.0, 17.0]]
+ )
def test_simple_choose_n_farthest_points_with_a_starting_point():
@@ -34,54 +26,29 @@ def test_simple_choose_n_farthest_points_with_a_starting_point():
i = 0
for point in point_set:
# The iteration starts with the given starting point
- sub_set = gudhi.choose_n_farthest_points(
- points=point_set, nb_points=1, starting_point=i
- )
+ sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=1, starting_point=i)
assert sub_set[0] == point_set[i]
i = i + 1
# The iteration finds then the farthest
- sub_set = gudhi.choose_n_farthest_points(
- points=point_set, nb_points=2, starting_point=1
- )
+ sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=2, starting_point=1)
assert sub_set[1] == point_set[3]
- sub_set = gudhi.choose_n_farthest_points(
- points=point_set, nb_points=2, starting_point=3
- )
+ sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=2, starting_point=3)
assert sub_set[1] == point_set[1]
- sub_set = gudhi.choose_n_farthest_points(
- points=point_set, nb_points=2, starting_point=0
- )
+ sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=2, starting_point=0)
assert sub_set[1] == point_set[2]
- sub_set = gudhi.choose_n_farthest_points(
- points=point_set, nb_points=2, starting_point=2
- )
+ sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=2, starting_point=2)
assert sub_set[1] == point_set[0]
# Test the limits
- assert (
- gudhi.choose_n_farthest_points(points=[], nb_points=0, starting_point=0) == []
- )
- assert (
- gudhi.choose_n_farthest_points(points=[], nb_points=1, starting_point=0) == []
- )
- assert (
- gudhi.choose_n_farthest_points(points=[], nb_points=0, starting_point=1) == []
- )
- assert (
- gudhi.choose_n_farthest_points(points=[], nb_points=1, starting_point=1) == []
- )
+ assert gudhi.choose_n_farthest_points(points=[], nb_points=0, starting_point=0) == []
+ assert gudhi.choose_n_farthest_points(points=[], nb_points=1, starting_point=0) == []
+ assert gudhi.choose_n_farthest_points(points=[], nb_points=0, starting_point=1) == []
+ assert gudhi.choose_n_farthest_points(points=[], nb_points=1, starting_point=1) == []
# From off file test
for i in range(0, 7):
- assert (
- len(
- gudhi.choose_n_farthest_points(
- off_file="subsample.off", nb_points=i, starting_point=i
- )
- )
- == i
- )
+ assert len(gudhi.choose_n_farthest_points(off_file="subsample.off", nb_points=i, starting_point=i)) == i
def test_simple_choose_n_farthest_points_randomed():
@@ -91,7 +58,7 @@ def test_simple_choose_n_farthest_points_randomed():
assert gudhi.choose_n_farthest_points(points=[], nb_points=1) == []
assert gudhi.choose_n_farthest_points(points=point_set, nb_points=0) == []
- # Go furter than point set on purpose
+ # Go further than point set on purpose
for iter in range(1, 10):
sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=iter)
for sub in sub_set:
@@ -104,10 +71,7 @@ def test_simple_choose_n_farthest_points_randomed():
# From off file test
for i in range(0, 7):
- assert (
- len(gudhi.choose_n_farthest_points(off_file="subsample.off", nb_points=i))
- == i
- )
+ assert len(gudhi.choose_n_farthest_points(off_file="subsample.off", nb_points=i)) == i
def test_simple_pick_n_random_points():
@@ -117,10 +81,9 @@ def test_simple_pick_n_random_points():
assert gudhi.pick_n_random_points(points=[], nb_points=1) == []
assert gudhi.pick_n_random_points(points=point_set, nb_points=0) == []
- # Go furter than point set on purpose
+ # Go further than point set on purpose
for iter in range(1, 10):
sub_set = gudhi.pick_n_random_points(points=point_set, nb_points=iter)
- print(5)
for sub in sub_set:
found = False
for point in point_set:
@@ -131,9 +94,7 @@ def test_simple_pick_n_random_points():
# From off file test
for i in range(0, 7):
- assert (
- len(gudhi.pick_n_random_points(off_file="subsample.off", nb_points=i)) == i
- )
+ assert len(gudhi.pick_n_random_points(off_file="subsample.off", nb_points=i)) == i
def test_simple_sparsify_points():
@@ -142,38 +103,21 @@ def test_simple_sparsify_points():
# assert gudhi.sparsify_point_set(points = [], min_squared_dist = 0.0) == []
# assert gudhi.sparsify_point_set(points = [], min_squared_dist = 10.0) == []
assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=0.0) == point_set
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.0) == point_set
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.0) == [
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=0.999) == point_set
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.001) == [
[0, 1],
[1, 0],
]
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.01) == [[0, 1]]
-
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=0.0))
- == 7
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=30.0))
- == 5
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=40.0))
- == 4
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=90.0))
- == 3
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=100.0))
- == 2
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=325.0))
- == 2
- )
- assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=325.01))
- == 1
- )
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.999) == [
+ [0, 1],
+ [1, 0],
+ ]
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.001) == [[0, 1]]
+
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=0.0)) == 7
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=30.0)) == 5
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=40.1)) == 4
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=89.9)) == 3
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=100.0)) == 2
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=324.9)) == 2
+ assert len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=325.01)) == 1
diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py
index e650e99c..8668a2e0 100755
--- a/src/python/test/test_tangential_complex.py
+++ b/src/python/test/test_tangential_complex.py
@@ -37,7 +37,7 @@ def test_tangential():
assert st.num_simplices() == 6
assert st.num_vertices() == 4
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -45,6 +45,7 @@ def test_tangential():
([3], 0.0),
([1, 3], 0.0),
]
+
assert st.get_cofaces([0], 1) == [([0, 2], 0.0)]
assert point_list[0] == tc.get_point(0)
diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py
new file mode 100755
index 00000000..1ead9bca
--- /dev/null
+++ b/src/python/test/test_time_delay.py
@@ -0,0 +1,43 @@
+from gudhi.point_cloud.timedelay import TimeDelayEmbedding
+import numpy as np
+
+
+def test_normal():
+ # Sample array
+ ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ # Normal case.
+ prep = TimeDelayEmbedding()
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([2, 3, 4])).all()
+ assert (pointclouds[2] == np.array([3, 4, 5])).all()
+ assert (pointclouds[3] == np.array([4, 5, 6])).all()
+ assert (pointclouds[4] == np.array([5, 6, 7])).all()
+ assert (pointclouds[5] == np.array([6, 7, 8])).all()
+ assert (pointclouds[6] == np.array([7, 8, 9])).all()
+ assert (pointclouds[7] == np.array([8, 9, 10])).all()
+ # Delay = 3
+ prep = TimeDelayEmbedding(delay=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 4, 7])).all()
+ assert (pointclouds[1] == np.array([2, 5, 8])).all()
+ assert (pointclouds[2] == np.array([3, 6, 9])).all()
+ assert (pointclouds[3] == np.array([4, 7, 10])).all()
+ # Skip = 3
+ prep = TimeDelayEmbedding(skip=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([4, 5, 6])).all()
+ assert (pointclouds[2] == np.array([7, 8, 9])).all()
+ # Delay = 2 / Skip = 2
+ prep = TimeDelayEmbedding(delay=2, skip=2)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 3, 5])).all()
+ assert (pointclouds[1] == np.array([3, 5, 7])).all()
+ assert (pointclouds[2] == np.array([5, 7, 9])).all()
+
+ # Vector series
+ ts = np.arange(0, 10).reshape(-1, 2)
+ prep = TimeDelayEmbedding(dim=4)
+ prep.fit([ts])
+ assert (prep.transform([ts])[0] == [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7], [6, 7, 8, 9]]).all()
diff --git a/src/python/test/test_tomato.py b/src/python/test/test_tomato.py
new file mode 100755
index 00000000..c571f799
--- /dev/null
+++ b/src/python/test/test_tomato.py
@@ -0,0 +1,65 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.clustering.tomato import Tomato
+import numpy as np
+import pytest
+import matplotlib.pyplot as plt
+
+# Disable graphics for testing purposes
+plt.show = lambda: None
+
+
+def test_tomato_1():
+ a = [(1, 2), (1.1, 1.9), (0.9, 1.8), (10, 0), (10.1, 0.05), (10.2, -0.1), (5.4, 0)]
+ t = Tomato(metric="euclidean", n_clusters=2, k=4, n_jobs=-1, eps=0.05)
+ assert np.array_equal(t.fit_predict(a), [1, 1, 1, 0, 0, 0, 0]) # or with swapped 0 and 1
+ assert np.array_equal(t.children_, [[0, 1]])
+
+ t = Tomato(density_type="KDE", r=1, k=4)
+ t.fit(a)
+ assert np.array_equal(t.leaf_labels_, [1, 1, 1, 0, 0, 0, 0]) # or with swapped 0 and 1
+ assert t.n_clusters_ == 2
+ t.merge_threshold_ = 10
+ assert t.n_clusters_ == 1
+ assert (t.labels_ == 0).all()
+
+ t = Tomato(graph_type="radius", r=0.1, metric="cosine", k=3)
+ assert np.array_equal(t.fit_predict(a), [1, 1, 1, 0, 0, 0, 0]) # or with swapped 0 and 1
+
+ t = Tomato(metric="euclidean", graph_type="radius", r=4.7, k=4)
+ t.fit(a)
+ assert t.max_weight_per_cc_.size == 2
+ assert t.neighbors_ == [[0, 1, 2], [0, 1, 2], [0, 1, 2], [3, 4, 5, 6], [3, 4, 5], [3, 4, 5], [3, 6]]
+ t.plot_diagram()
+
+ t = Tomato(graph_type="radius", r=4.7, k=4, symmetrize_graph=True)
+ t.fit(a)
+ assert t.max_weight_per_cc_.size == 2
+ assert [set(i) for i in t.neighbors_] == [{1, 2}, {0, 2}, {0, 1}, {4, 5, 6}, {3, 5}, {3, 4}, {3}]
+
+ t = Tomato(n_clusters=2, k=4, symmetrize_graph=True)
+ t.fit(a)
+ assert [set(i) for i in t.neighbors_] == [
+ {1, 2, 6},
+ {0, 2, 6},
+ {0, 1, 6},
+ {4, 5, 6},
+ {3, 5, 6},
+ {3, 4, 6},
+ {0, 1, 2, 3, 4, 5},
+ ]
+ t.plot_diagram()
+
+ t = Tomato(k=6, metric="manhattan")
+ t.fit(a)
+ assert t.diagram_.size == 0
+ assert t.max_weight_per_cc_.size == 1
+ t.plot_diagram()
diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py
new file mode 100755
index 00000000..f68c748e
--- /dev/null
+++ b/src/python/test/test_wasserstein_barycenter.py
@@ -0,0 +1,46 @@
+from gudhi.wasserstein.barycenter import lagrangian_barycenter
+import numpy as np
+
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Theo Lacombe
+
+ Copyright (C) 2019 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+__author__ = "Theo Lacombe"
+__copyright__ = "Copyright (C) 2019 Inria"
+__license__ = "MIT"
+
+
+def test_lagrangian_barycenter():
+
+ dg1 = np.array([[0.2, 0.5]])
+ dg2 = np.array([[0.2, 0.7]])
+ dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]])
+ dg4 = np.array([])
+ dg5 = np.array([])
+ dg6 = np.array([])
+ res = np.array([[0.27916667, 0.55416667], [0.7375, 0.7625], [0.2375, 0.2625]])
+
+ dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]])
+ dg8 = np.array([[0., 4.], [4, 8]])
+
+ # error crit.
+ eps = 1e-7
+
+
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps
+ assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2)))
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps
+ Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True)
+ assert np.linalg.norm(Y - np.array([[1,3], [5, 7]])) < eps
+ assert np.abs(log["energy"] - 2) < eps
+ assert np.array_equal(log["groupings"][0] , np.array([[0, -1], [1, -1]]))
+ assert np.array_equal(log["groupings"][1] , np.array([[0, 0], [1, 1]]))
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3], [5, 7]])) < eps
+ assert lagrangian_barycenter(pdiagset = []) is None
+
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 43dda77e..a76b6ce7 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -1,48 +1,201 @@
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Theo Lacombe
+ Author(s): Theo Lacombe, Marc Glisse
Copyright (C) 2019 Inria
Modification(s):
+ - 2020/07 Théo Lacombe: Added tests about handling essential parts in diagrams.
- YYYY/MM Author: Description of the modification
"""
-from gudhi.wasserstein import wasserstein_distance
+from gudhi.wasserstein.wasserstein import _proj_on_diag, _finite_part, _handle_essential_parts, _get_essential_parts
+from gudhi.wasserstein.wasserstein import _warn_infty
+from gudhi.wasserstein import wasserstein_distance as pot
+from gudhi.hera import wasserstein_distance as hera
import numpy as np
+import pytest
+
__author__ = "Theo Lacombe"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
-def test_basic_wasserstein():
+def test_proj_on_diag():
+ dgm = np.array([[1., 1.], [1., 2.], [3., 5.]])
+ assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]])
+ empty = np.empty((0, 2))
+ assert np.array_equal(_proj_on_diag(empty), empty)
+
+
+def test_finite_part():
+ diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf],
+ [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]])
+ assert np.array_equal(_finite_part(diag), [[0, 1], [3, 5]])
+
+
+def test_handle_essential_parts():
+ diag1 = np.array([[0, 1], [3, 5],
+ [2, np.inf], [3, np.inf],
+ [-np.inf, 8], [-np.inf, 12],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag2 = np.array([[0, 2], [3, 5],
+ [2, np.inf], [4, np.inf],
+ [-np.inf, 8], [-np.inf, 11],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag3 = np.array([[0, 2], [3, 5],
+ [2, np.inf], [4, np.inf], [6, np.inf],
+ [-np.inf, 8], [-np.inf, 11],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ c, m = _handle_essential_parts(diag1, diag2, order=1)
+ assert c == pytest.approx(2, 0.0001) # Note: here c is only the cost due to essential part (thus 2, not 3)
+ # Similarly, the matching only corresponds to essential parts.
+ # Note that (-inf,-inf) and (+inf,+inf) coordinates are matched to the diagonal.
+ assert np.array_equal(m, [[4, 4], [5, 5], [2, 2], [3, 3], [8, 8], [9, 9], [6, -1], [7, -1], [-1, 6], [-1, 7]])
+
+ c, m = _handle_essential_parts(diag1, diag3, order=1)
+ assert c == np.inf
+ assert (m is None)
+
+
+def test_get_essential_parts():
+ diag1 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf],
+ [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag2 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf]])
+
+ res = _get_essential_parts(diag1)
+ res2 = _get_essential_parts(diag2)
+ assert np.array_equal(res[0], [4, 5])
+ assert np.array_equal(res[1], [2, 3])
+ assert np.array_equal(res[2], [8, 9])
+ assert np.array_equal(res[3], [6] )
+ assert np.array_equal(res[4], [7] )
+
+ assert np.array_equal(res2[0], [] )
+ assert np.array_equal(res2[1], [2, 3])
+ assert np.array_equal(res2[2], [] )
+ assert np.array_equal(res2[3], [] )
+ assert np.array_equal(res2[4], [] )
+
+
+def test_warn_infty():
+ with pytest.warns(UserWarning):
+ assert _warn_infty(matching=False)==np.inf
+ c, m = _warn_infty(matching=True)
+ assert (c == np.inf)
+ assert (m is None)
+
+
+def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
diag3 = np.array([[0, 2], [4, 6]])
diag4 = np.array([[0, 3], [4, 8]])
- emptydiag = np.array([[]])
+ emptydiag = np.array([])
+
+ # We just need to handle positive numbers here
+ def approx(x):
+ return pytest.approx(x, rel=delta)
assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=2.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=2.) == 0.
- assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == 2.
- assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == 4.
+ assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == approx(2.)
+ assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == approx(4.)
+
+ assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == approx(5.) # thank you Pythagorician triplets
+ assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == approx(2.5)
+ assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == approx(3.5355339059327378)
+
+ assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == approx(1.4453593023967701)
+ assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == approx(0.9772734057168739)
+
+ assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == approx(3.141592214572228)
+
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == approx(3.)
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == approx(3.) # no diag matching here
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == approx(np.sqrt(5))
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == approx(np.sqrt(5))
+ assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == approx(np.sqrt(5))
+
+ if test_infinity:
+ diag5 = np.array([[0, 3], [4, np.inf]])
+ diag6 = np.array([[7, 8], [4, 6], [3, np.inf]])
+
+ assert wasserstein_distance(diag4, diag5) == np.inf
+ assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.)
+ assert wasserstein_distance(diag5, emptydiag) == np.inf
+
+ if test_matching:
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1]
+ assert np.array_equal(match , [[-1, 0], [-1, 1]])
+ match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match , [[0, -1], [1, -1]])
+ match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1]
+ assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]])
+
+ if test_matching and test_infinity:
+ diag7 = np.array([[0, 3], [4, np.inf], [5, np.inf]])
+ diag8 = np.array([[0,1], [0, np.inf], [-np.inf, -np.inf], [np.inf, np.inf]])
+ diag9 = np.array([[-np.inf, -np.inf], [np.inf, np.inf]])
+ diag10 = np.array([[0,1], [-np.inf, -np.inf], [np.inf, np.inf]])
+
+ match = wasserstein_distance(diag5, diag6, matching=True, internal_p=2., order=2.)[1]
+ assert np.array_equal(match, [[0, -1], [-1,0], [-1, 1], [1, 2]])
+ match = wasserstein_distance(diag5, diag7, matching=True, internal_p=2., order=2.)[1]
+ assert (match is None)
+ cost, match = wasserstein_distance(diag7, emptydiag, matching=True, internal_p=2., order=2.3)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(emptydiag, diag7, matching=True, internal_p=2.42, order=2.)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(diag8, diag9, matching=True, internal_p=2., order=2.)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(diag9, diag10, matching=True, internal_p=1., order=1.)
+ assert (cost == 1)
+ assert (match == [[0, -1],[1, -1],[-1, 0], [-1, 1], [-1, 2]]) # type 4 and 5 are match to the diag anyway.
+ cost, match = wasserstein_distance(diag9, emptydiag, matching=True, internal_p=2., order=2.)
+ assert (cost == 0.)
+ assert (match == [[0, -1], [1, -1]])
+
+
+def hera_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return hera(*kargs,**kwargs,**extra)
+ return fun
+
+
+def pot_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return pot(*kargs,**kwargs,**extra)
+ return fun
- assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == 5. # thank you Pythagorician triplets
- assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == 2.5
- assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == 3.5355339059327378
- assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == 1.4453593023967701
- assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == 0.9772734057168739
+def test_wasserstein_distance_pot():
+ _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) # pot with its standard args
+ _basic_wasserstein(pot_wrap(enable_autodiff=True, keep_essential_parts=False), 1e-15, test_infinity=False, test_matching=False)
- assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == 3.141592214572228
- assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == 3.
- assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == 3. # no diag matching here
- assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == np.sqrt(5)
- assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == np.sqrt(5)
- assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == np.sqrt(5)
+def test_wasserstein_distance_hera():
+ _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
diff --git a/src/python/test/test_wasserstein_with_tensors.py b/src/python/test/test_wasserstein_with_tensors.py
new file mode 100755
index 00000000..e3f1411a
--- /dev/null
+++ b/src/python/test/test_wasserstein_with_tensors.py
@@ -0,0 +1,47 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Mathieu Carriere
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.wasserstein import wasserstein_distance as pot
+import numpy as np
+import torch
+import tensorflow as tf
+
+def test_wasserstein_distance_grad():
+ diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
+ diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ assert diag1.grad is None and diag2.grad is None and diag3.grad is None
+ dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
+ dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
+ dist12.backward()
+ dist30.backward()
+ assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
+ diag4 = torch.tensor([[0., 10.]], requires_grad=True)
+ diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+ dist45.backward()
+ assert np.array_equal(diag4.grad, [[-1., -1.]])
+ assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
+ diag6 = torch.tensor([[5., 10.]], requires_grad=True)
+ pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ # assert np.array_equal(diag6.grad, [[0., 0.]])
+
+def test_wasserstein_distance_grad_tensorflow():
+ with tf.GradientTape() as tape:
+ diag4 = tf.convert_to_tensor(tf.Variable(initial_value=np.array([[0., 10.]]), trainable=True))
+ diag5 = tf.convert_to_tensor(tf.Variable(initial_value=np.array([[1., 11.], [3., 4.]]), trainable=True))
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+
+ grads = tape.gradient(dist45, [diag4, diag5])
+ assert np.array_equal(grads[0].values, [[-1., -1.]])
+ assert np.array_equal(grads[1].values, [[1., 1.], [-1., 1.]]) \ No newline at end of file
diff --git a/src/python/test/test_weighted_rips_complex.py b/src/python/test/test_weighted_rips_complex.py
new file mode 100644
index 00000000..7ef48333
--- /dev/null
+++ b/src/python/test/test_weighted_rips_complex.py
@@ -0,0 +1,63 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Yuichi Ike and Masatoshi Takenouchi
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.weighted_rips_complex import WeightedRipsComplex
+from gudhi.point_cloud.dtm import DistanceToMeasure
+import numpy as np
+from math import sqrt
+from scipy.spatial.distance import cdist
+import pytest
+
+def test_non_dtm_rips_complex():
+ dist = [[], [1]]
+ weights = [1, 100]
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ assert st.filtration([0,1]) == pytest.approx(200.0)
+
+def test_compatibility_with_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=42)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+ assert list(st.get_filtration()) == [
+ ([0], 0.0),
+ ([1], 0.0),
+ ([2], 0.0),
+ ([3], 0.0),
+ ([0, 1], 1.0),
+ ([0, 2], 1.0),
+ ([1, 3], 1.0),
+ ([2, 3], 1.0),
+ ([1, 2], sqrt(2)),
+ ([0, 3], sqrt(2)),
+ ]
+
+def test_compatibility_with_filtered_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=1.0)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+
+ assert st.__is_defined() == True
+ assert st.__is_persistence_defined() == False
+
+ assert st.num_simplices() == 8
+ assert st.num_vertices() == 4
+
+def test_dtm_rips_complex():
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dist = cdist(pts,pts)
+ dtm = DistanceToMeasure(2, q=2, metric="precomputed")
+ r = dtm.fit_transform(dist)
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ st.persistence()
+ persistence_intervals0 = st.persistence_intervals_in_dimension(0)
+ assert persistence_intervals0 == pytest.approx(np.array([[3.16227766, 5.39834564],[3.16227766, 5.39834564], [3.16227766, float("inf")]]))
+