From 5877b4d3b7aca645ba906dfe0be598b1881d8798 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 16 Dec 2019 17:53:59 +0100 Subject: update CMakeLists and create test_wasserstein_bary --- src/python/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 9af85eac..7f9ff38f 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -52,6 +52,7 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'barycenter', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") add_gudhi_debug_info("Cython version ${CYTHON_VERSION}") @@ -210,6 +211,7 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/barycenter.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( OUTPUT gudhi.so @@ -385,6 +387,7 @@ if(PYTHONINTERP_FOUND) # Wasserstein if(OT_FOUND) add_gudhi_py_test(test_wasserstein_distance) + add_gudhi_py_test(test_wasserstein_barycenter) endif(OT_FOUND) # Representations -- cgit v1.2.3 From 68b6e3f3d641cd4a1e86f08bff96e417cc17ac59 Mon Sep 17 00:00:00 2001 From: takeshimeonerespect Date: Fri, 31 Jan 2020 08:08:43 +0100 Subject: timedelay added on fork --- src/python/CMakeLists.txt | 5 +++ src/python/doc/point_cloud.rst | 7 ++++ src/python/gudhi/point_cloud/timedelay.py | 56 +++++++++++++++++++++++++++++++ src/python/test/test_point_cloud.py | 35 +++++++++++++++++++ 4 files changed, 103 insertions(+) create mode 100644 src/python/gudhi/point_cloud/timedelay.py create mode 100755 src/python/test/test_point_cloud.py (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b558d4c4..b23ec8a9 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -52,6 +52,7 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") add_gudhi_debug_info("Cython version ${CYTHON_VERSION}") @@ -221,6 +222,7 @@ endif(CGAL_FOUND) file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( OUTPUT gudhi.so @@ -399,6 +401,9 @@ endif(CGAL_FOUND) add_gudhi_py_test(test_representations) endif() + # Point cloud + add_gudhi_py_test(test_point_cloud) + # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) if(MATPLOTLIB_FOUND) diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst index d668428a..55c74ff3 100644 --- a/src/python/doc/point_cloud.rst +++ b/src/python/doc/point_cloud.rst @@ -20,3 +20,10 @@ Subsampling :members: :special-members: :show-inheritance: + +TimeDelayEmbedding +------------------ + +.. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding + :members: + diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py new file mode 100644 index 00000000..5c7ba542 --- /dev/null +++ b/src/python/gudhi/point_cloud/timedelay.py @@ -0,0 +1,56 @@ +import numpy as np + +class TimeDelayEmbedding: + """Point cloud transformation class. + + Embeds time-series data in the R^d according to Takens' Embedding Theorem + and obtains the coordinates of each point. + + Parameters + ---------- + dim : int, optional (default=3) + `d` of R^d to be embedded. + + delay : int, optional (default=1) + Time-Delay embedding. + + skip : int, optional (default=1) + How often to skip embedded points. + + """ + def __init__(self, dim=3, delay=1, skip=1): + self._dim = dim + self._delay = delay + self._skip = skip + + def __call__(self, *args, **kwargs): + return self.transform(*args, **kwargs) + + def _transform(self, ts): + """Guts of transform method.""" + return ts[ + np.add.outer( + np.arange(0, len(ts)-self._delay*(self._dim-1), self._skip), + np.arange(0, self._dim*self._delay, self._delay)) + ] + + def transform(self, ts): + """Transform method. + + Parameters + ---------- + ts : list[float] or list[list[float]] + A single or multiple time-series data. + + Returns + ------- + point clouds : list[list[float, float, float]] or list[list[list[float, float, float]]] + Makes point cloud every a single time-series data. + """ + ndts = np.array(ts) + if ndts.ndim == 1: + # for single. + return self._transform(ndts).tolist() + else: + # for multiple. + return np.apply_along_axis(self._transform, 1, ndts).tolist() diff --git a/src/python/test/test_point_cloud.py b/src/python/test/test_point_cloud.py new file mode 100755 index 00000000..2ee0c1fb --- /dev/null +++ b/src/python/test/test_point_cloud.py @@ -0,0 +1,35 @@ +from gudhi.point_cloud.timedelay import TimeDelayEmbedding + +def test_normal(): + # Sample array + ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + # Normal case. + prep = TimeDelayEmbedding() + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [2, 3, 4]) + assert (attractor[2] == [3, 4, 5]) + assert (attractor[3] == [4, 5, 6]) + assert (attractor[4] == [5, 6, 7]) + assert (attractor[5] == [6, 7, 8]) + assert (attractor[6] == [7, 8, 9]) + assert (attractor[7] == [8, 9, 10]) + # Delay = 3 + prep = TimeDelayEmbedding(delay=3) + attractor = prep(ts) + assert (attractor[0] == [1, 4, 7]) + assert (attractor[1] == [2, 5, 8]) + assert (attractor[2] == [3, 6, 9]) + assert (attractor[3] == [4, 7, 10]) + # Skip = 3 + prep = TimeDelayEmbedding(skip=3) + attractor = prep(ts) + assert (attractor[0] == [1, 2, 3]) + assert (attractor[1] == [4, 5, 6]) + assert (attractor[2] == [7, 8, 9]) + # Delay = 2 / Skip = 2 + prep = TimeDelayEmbedding(delay=2, skip=2) + attractor = prep(ts) + assert (attractor[0] == [1, 3, 5]) + assert (attractor[1] == [3, 5, 7]) + assert (attractor[2] == [5, 7, 9]) -- cgit v1.2.3 From 596355344e6205d02110e38a0cb7e0a94e8dbd27 Mon Sep 17 00:00:00 2001 From: takenouchi Date: Thu, 6 Feb 2020 16:00:47 +0900 Subject: modify CMakeLists.txt --- src/python/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b23ec8a9..798e2907 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -401,8 +401,8 @@ endif(CGAL_FOUND) add_gudhi_py_test(test_representations) endif() - # Point cloud - add_gudhi_py_test(test_point_cloud) + # Time Delay + add_gudhi_py_test(test_time_delay) # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) -- cgit v1.2.3 From b262406b0a75e39276c11f70ef1174981aa31b51 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 17 Mar 2020 17:57:17 +0100 Subject: Remove thread_local workaround --- src/Alpha_complex/include/gudhi/Alpha_complex_3d.h | 5 +-- src/Nerve_GIC/include/gudhi/GIC.h | 14 +------- .../include/gudhi/Persistent_cohomology.h | 5 +-- src/Simplex_tree/include/gudhi/Simplex_tree.h | 12 ++----- src/cmake/modules/GUDHI_compilation_flags.cmake | 37 ---------------------- src/python/CMakeLists.txt | 10 ------ 6 files changed, 5 insertions(+), 78 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h index 7f96c94c..1486cefd 100644 --- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h +++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h @@ -61,10 +61,7 @@ namespace Gudhi { namespace alpha_complex { -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL -thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001; +thread_local double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001; // Value_from_iterator returns the filtration value from an iterator on alpha shapes values // diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h index 2a6d4788..9a4c813d 100644 --- a/src/Nerve_GIC/include/gudhi/GIC.h +++ b/src/Nerve_GIC/include/gudhi/GIC.h @@ -139,19 +139,9 @@ class Cover_complex { for (boost::tie(ei, ei_end) = boost::edges(G); ei != ei_end; ++ei) boost::remove_edge(*ei, G); } - // Thread local is not available on XCode version < V.8 - // If not available, random engine is a class member. -#ifndef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::default_random_engine re; -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - // Find random number in [0,1]. double GetUniform() { - // Thread local is not available on XCode version < V.8 - // If available, random engine is defined for each thread. -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL thread_local std::default_random_engine re; -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL std::uniform_real_distribution Dist(0, 1); return Dist(re); } @@ -456,9 +446,7 @@ class Cover_complex { if (distances.size() == 0) compute_pairwise_distances(distance); - // This cannot be parallelized if thread_local is not defined - // thread_local is not defined for XCode < v.8 - #if defined(GUDHI_USE_TBB) && defined(GUDHI_CAN_USE_CXX11_THREAD_LOCAL) + #ifdef GUDHI_USE_TBB std::mutex deltamutex; tbb::parallel_for(0, N, [&](int i){ std::vector samples(m); diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h index b1ded5ae..ca4bc10d 100644 --- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h +++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h @@ -288,10 +288,7 @@ class Persistent_cohomology { // with multiplicity. We used to sum the coefficients directly in // annotations_in_boundary by using a map, we now do it later. typedef std::pair annotation_t; -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector annotations_in_boundary; + thread_local std::vector annotations_in_boundary; annotations_in_boundary.clear(); int sign = 1 - 2 * (dim_sigma % 2); // \in {-1,1} provides the sign in the // alternate sum in the boundary. diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index b7fb9002..2adc8354 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -765,12 +765,7 @@ class Simplex_tree { if (first == last) return { null_simplex(), true }; // FIXME: false would make more sense to me. - // Copy before sorting - // Thread local is not available on XCode version < V.8 - It will slow down computation -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector copy; + thread_local std::vector copy; copy.clear(); copy.insert(copy.end(), first, last); std::sort(copy.begin(), copy.end()); @@ -1133,10 +1128,7 @@ class Simplex_tree { Dictionary_it next = siblings->members().begin(); ++next; -#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL - thread_local -#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL - std::vector > inter; + thread_local std::vector > inter; for (Dictionary_it s_h = siblings->members().begin(); s_h != siblings->members().end(); ++s_h, ++next) { Simplex_handle root_sh = find_vertex(s_h->first); diff --git a/src/cmake/modules/GUDHI_compilation_flags.cmake b/src/cmake/modules/GUDHI_compilation_flags.cmake index 34c2e065..567fbc40 100644 --- a/src/cmake/modules/GUDHI_compilation_flags.cmake +++ b/src/cmake/modules/GUDHI_compilation_flags.cmake @@ -1,7 +1,6 @@ # This files manage compilation flags required by GUDHI include(TestCXXAcceptsFlag) -include(CheckCXXSourceCompiles) # add a compiler flag only if it is accepted macro(add_cxx_compiler_flag _flag) @@ -12,32 +11,6 @@ macro(add_cxx_compiler_flag _flag) endif() endmacro() -function(can_cgal_use_cxx11_thread_local) - # This is because of https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/tss.h - # CGAL is using boost thread if thread_local is not ready (requires XCode 8 for Mac). - # The test in https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/config.h - # #if __has_feature(cxx_thread_local) || \ - # ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \ - # ( _MSC_VER >= 1900 ) - # #define CGAL_CAN_USE_CXX11_THREAD_LOCAL - # #endif - set(CGAL_CAN_USE_CXX11_THREAD_LOCAL " - int main() { - #ifndef __has_feature - #define __has_feature(x) 0 // Compatibility with non-clang compilers. - #endif - #if __has_feature(cxx_thread_local) || \ - ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \ - ( _MSC_VER >= 1900 ) - bool has_feature_thread_local = true; - #else - // Explicit error of compilation for CMake test purpose - has_feature_thread_local is not defined - #endif - bool result = has_feature_thread_local; - } ") - check_cxx_source_compiles("${CGAL_CAN_USE_CXX11_THREAD_LOCAL}" CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT) -endfunction() - set (CMAKE_CXX_STANDARD 14) enable_testing() @@ -58,16 +31,6 @@ if (DEBUG_TRACES) add_definitions(-DDEBUG_TRACES) endif() -set(GUDHI_CAN_USE_CXX11_THREAD_LOCAL " - int main() { - thread_local int result = 0; - return result; - } ") -check_cxx_source_compiles("${GUDHI_CAN_USE_CXX11_THREAD_LOCAL}" GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT) -if (GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT) - add_definitions(-DGUDHI_CAN_USE_CXX11_THREAD_LOCAL) -endif() - if(CMAKE_BUILD_TYPE MATCHES Debug) message("++ Debug compilation flags are: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") else() diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 22af3ec9..f00966a5 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -128,16 +128,6 @@ if(PYTHONINTERP_FOUND) endif () if(CGAL_FOUND) - can_cgal_use_cxx11_thread_local() - if (NOT CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT) - if(CMAKE_BUILD_TYPE MATCHES Debug) - add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_DEBUG}") - else() - add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_RELEASE}") - endif() - message("** Add Boost ${Boost_LIBRARY_DIRS}") - set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${Boost_LIBRARY_DIRS}', ") - endif() # Add CGAL compilation args if(CGAL_HEADER_ONLY) add_gudhi_debug_info("CGAL header only version ${CGAL_VERSION}") -- cgit v1.2.3 From ec4a9583adaa73c01b05a4b30425581ed7256379 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 24 Mar 2020 14:50:53 +0100 Subject: Remove min_persistence from generators It is supposed to be handled in persistence() already. --- src/python/CMakeLists.txt | 1 + src/python/gudhi/simplex_tree.pxd | 4 ++-- src/python/gudhi/simplex_tree.pyx | 18 ++++-------------- src/python/include/Persistent_cohomology_interface.h | 8 ++------ src/python/test/test_simplex_generators.py | 2 +- 5 files changed, 10 insertions(+), 23 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index f00966a5..fb219884 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -374,6 +374,7 @@ if(PYTHONINTERP_FOUND) ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py) add_gudhi_py_test(test_simplex_tree) + add_gudhi_py_test(test_simplex_generators) # Witness add_test(NAME witness_complex_from_nearest_landmark_table_py_test diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 44789365..4038b41d 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -75,5 +75,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi": vector[pair[double,double]] intervals_in_dimension(int dimension) void write_output_diagram(string diagram_file_name) vector[pair[vector[int], vector[int]]] persistence_pairs() - pair[vector[vector[int]], vector[vector[int]]] lower_star_generators(double) - pair[vector[vector[int]], vector[vector[int]]] flag_generators(double) + pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() + pair[vector[vector[int]], vector[vector[int]]] flag_generators() diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx index faa9f9d8..beb40bc4 100644 --- a/src/python/gudhi/simplex_tree.pyx +++ b/src/python/gudhi/simplex_tree.pyx @@ -526,15 +526,10 @@ cdef class SimplexTree: print("intervals_in_dim function requires persistence function" " to be launched first.") - def lower_star_persistence_generators(self, min_persistence=0.): + def lower_star_persistence_generators(self): """Assuming this is a lower-star filtration, this function returns the persistence pairs, where each simplex is replaced with the vertex that gave it its filtration value. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Set min_persistence to -1.0 to see all values. - :type min_persistence: float. :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity, and second the essential features, grouped by dimension, with one vertex each :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]] @@ -542,22 +537,17 @@ cdef class SimplexTree: :note: lower_star_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.lower_star_generators(min_persistence) + gen = self.pcohptr.lower_star_generators() normal = [np_array(d).reshape(-1,2) for d in gen.first] infinite = [np_array(d) for d in gen.second] return (normal, infinite) else: print("lower_star_persistence_generators() requires that persistence() be called first.") - def flag_persistence_generators(self, min_persistence=0.): + def flag_persistence_generators(self): """Assuming this is a flag complex, this function returns the persistence pairs, where each simplex is replaced with the vertices of the edges that gave it its filtration value. - :param min_persistence: The minimum persistence value to take into - account (strictly greater than min_persistence). Default value is - 0.0. - Set min_persistence to -1.0 to see all values. - :type min_persistence: float. :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death; then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity; then the connected components, with one vertex each; @@ -567,7 +557,7 @@ cdef class SimplexTree: :note: flag_persistence_generators requires that `persistence()` be called first. """ if self.pcohptr != NULL: - gen = self.pcohptr.flag_generators(min_persistence) + gen = self.pcohptr.flag_generators() if len(gen.first) == 0: normal0 = numpy.empty((0,3)) normals = [] diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h index 3ce40af5..3074389c 100644 --- a/src/python/include/Persistent_cohomology_interface.h +++ b/src/python/include/Persistent_cohomology_interface.h @@ -100,7 +100,7 @@ persistent_cohomology::Persistent_cohomology>, std::vector>> Generators; - Generators lower_star_generators(double min_persistence) { + Generators lower_star_generators() { Generators out; // diags[i] should be interpreted as vector> auto& diags = out.first; @@ -109,8 +109,6 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); - if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) - continue; int dim = stptr_->dimension(s); auto v = stptr_->vertex_with_same_filtration(s); if(t == stptr_->null_simplex()) { @@ -128,7 +126,7 @@ persistent_cohomology::Persistent_cohomology> and other diags[i] as vector> auto& diags = out.first; @@ -137,8 +135,6 @@ persistent_cohomology::Persistent_cohomology(pair); auto t = std::get<1>(pair); - if(stptr_->filtration(t) - stptr_->filtration(s) <= min_persistence) - continue; int dim = stptr_->dimension(s); bool infinite = t == stptr_->null_simplex(); if(infinite) { diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py index efb5f8e3..e3bdc094 100755 --- a/src/python/test/test_simplex_generators.py +++ b/src/python/test/test_simplex_generators.py @@ -37,7 +37,7 @@ def test_lower_star_generators(): st.assign_filtration([1], 2) st.make_filtration_non_decreasing() st.persistence(min_persistence=-1) - g = st.lower_star_persistence_generators(min_persistence=-1) + g = st.lower_star_persistence_generators() assert len(g[0]) == 2 assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]]) assert np.array_equal(g[0][1], [[1, 1]]) -- cgit v1.2.3 From c8c942c43643131a7ef9899826a7095e497150fe Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:10:26 +0100 Subject: cmake --- .../modules/GUDHI_third_party_libraries.cmake | 3 + src/python/CMakeLists.txt | 14 ++ src/python/gudhi/point_cloud/dtm.py | 40 +++++ src/python/gudhi/point_cloud/knn.py | 193 +++++++++++++++++++++ src/python/test/test_dtm.py | 32 ++++ 5 files changed, 282 insertions(+) create mode 100644 src/python/gudhi/point_cloud/dtm.py create mode 100644 src/python/gudhi/point_cloud/knn.py create mode 100755 src/python/test/test_dtm.py (limited to 'src/python/CMakeLists.txt') diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index 2d010483..c2039674 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -160,6 +160,9 @@ if( PYTHONINTERP_FOUND ) find_python_module("sklearn") find_python_module("ot") find_python_module("pybind11") + find_python_module("torch") + find_python_module("hnswlib") + find_python_module("pykeops") endif() if(NOT GUDHI_PYTHON_PATH) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index f00966a5..d26d3e6e 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -78,6 +78,15 @@ if(PYTHONINTERP_FOUND) if(OT_FOUND) add_gudhi_debug_info("POT version ${OT_VERSION}") endif() + if(HNSWLIB_FOUND) + add_gudhi_debug_info("HNSWlib version ${OT_VERSION}") + endif() + if(TORCH_FOUND) + add_gudhi_debug_info("PyTorch version ${OT_VERSION}") + endif() + if(PYKEOPS_FOUND) + add_gudhi_debug_info("PyKeOps version ${OT_VERSION}") + endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ") @@ -399,6 +408,11 @@ if(PYTHONINTERP_FOUND) # Time Delay add_gudhi_py_test(test_time_delay) + # DTM + if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + add_gudhi_py_test(test_dtm) + endif() + # Documentation generation is available through sphinx - requires all modules if(SPHINX_PATH) if(MATPLOTLIB_FOUND) diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py new file mode 100644 index 00000000..08f9ea60 --- /dev/null +++ b/src/python/gudhi/point_cloud/dtm.py @@ -0,0 +1,40 @@ +from .knn import KNN + + +class DTM: + def __init__(self, k, q=2, **kwargs): + """ + Args: + q (float): order used to compute the distance to measure. Defaults to the dimension, or 2 if input_type is 'distance_matrix'. + kwargs: Same parameters as KNN, except that metric="neighbors" means that transform() expects an array with the distances to the k nearest neighbors. + """ + self.k = k + self.q = q + self.params = kwargs + + def fit_transform(self, X, y=None): + return self.fit(X).transform(X) + + def fit(self, X, y=None): + """ + Args: + X (numpy.array): coordinates for mass points + """ + if self.params.setdefault("metric", "euclidean") != "neighbors": + self.knn = KNN(self.k, return_index=False, return_distance=True, **self.params) + self.knn.fit(X) + return self + + def transform(self, X): + """ + Args: + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed", or distances to the k nearest neighbors if metric is "neighbors" (if the array has more than k columns, the remaining ones are ignored). + """ + if self.params["metric"] == "neighbors": + distances = X[:, : self.k] + else: + distances = self.knn.transform(X) + distances = distances ** self.q + dtm = distances.sum(-1) / self.k + dtm = dtm ** (1.0 / self.q) + return dtm diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py new file mode 100644 index 00000000..57078f1e --- /dev/null +++ b/src/python/gudhi/point_cloud/knn.py @@ -0,0 +1,193 @@ +import numpy + + +class KNN: + def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs): + """ + Args: + k (int): number of neighbors (including the point itself). + return_index (bool): if True, return the index of each neighbor. + return_distance (bool): if True, return the distance to each neighbor. + implementation (str): Choice of the library that does the real work. + + * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes + large (10+) but the number of points remains low (less than a million). + Only "minkowski" and its aliases are supported. + * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported. + * 'sklearn' for scikit-learn's NearestNeighbors. + Note that this provides in particular an option algorithm="brute". + * 'hnsw' for hnswlib.Index. It is very fast but does not provide guarantees. + Only supports "euclidean" for now. + * None will try to select a sensible one (scipy if possible, scikit-learn otherwise). + metric (str): see `sklearn.neighbors.NearestNeighbors`. + eps (float): relative error when computing nearest neighbors with the cKDTree. + p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2. + n_jobs (int): Number of jobs to schedule for parallel processing of nearest neighbors on the CPU. + If -1 is given all processors are used. Default: 1. + + Additional parameters are forwarded to the backends. + """ + self.k = k + self.return_index = return_index + self.return_distance = return_distance + self.metric = metric + self.params = kwargs + # canonicalize + if metric == "euclidean": + self.params["p"] = 2 + self.metric = "minkowski" + elif metric == "manhattan": + self.params["p"] = 1 + self.metric = "minkowski" + elif metric == "chebyshev": + self.params["p"] = numpy.inf + self.metric = "minkowski" + elif metric == "minkowski": + self.params["p"] = kwargs.get("p", 2) + if self.params.get("implementation") in {"keops", "ckdtree"}: + assert self.metric == "minkowski" + if self.params.get("implementation") == "hnsw": + assert self.metric == "minkowski" and self.params["p"] == 2 + if not self.params.get("implementation"): + if self.metric == "minkowski": + self.params["implementation"] = "ckdtree" + else: + self.params["implementation"] = "sklearn" + + def fit_transform(self, X, y=None): + return self.fit(X).transform(X) + + def fit(self, X, y=None): + """ + Args: + X (numpy.array): coordinates for reference points + """ + self.ref_points = X + if self.params.get("implementation") == "ckdtree": + # sklearn could handle this, but it is much slower + from scipy.spatial import cKDTree + self.kdtree = cKDTree(X) + + if self.params.get("implementation") == "sklearn" and self.metric != "precomputed": + # FIXME: sklearn badly handles "precomputed" + from sklearn.neighbors import NearestNeighbors + + nargs = {k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}} + self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs) + self.nn.fit(X) + + if self.params.get("implementation") == "hnsw": + import hnswlib + self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances + self.graph.init_index(len(X), **{k:v for k,v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}) + n = self.params.get("num_threads") + if n is None: + n = self.params.get("n_jobs", 1) + self.params["num_threads"] = n + self.graph.add_items(X, num_threads=n) + + return self + + def transform(self, X): + """ + Args: + X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed" + """ + metric = self.metric + k = self.k + + if metric == "precomputed": + # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural. + X = numpy.array(X) + if self.return_index: + neighbors = numpy.argpartition(X, k - 1)[:, 0:k] + distances = numpy.take_along_axis(X, neighbors, axis=-1) + ngb_order = numpy.argsort(distances, axis=-1) + neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1) + if self.return_distance: + distances = numpy.take_along_axis(distances, ngb_order, axis=-1) + return neighbors, distances + else: + return neighbors + if self.return_distance: + distances = numpy.partition(X, k - 1)[:, 0:k] + # partition is not guaranteed to sort the lower half, although it often does + distances.sort(axis=-1) + return distances + return None + + if self.params.get("implementation") == "hnsw": + ef = self.params.get("ef") + if ef is not None: + self.graph.set_ef(ef) + neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"]) + # The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn, + # which returns a priority_queue, and then fills the return array backwards with top/pop on the queue. + if self.return_index: + if self.return_distance: + return neighbors, numpy.sqrt(distances) + else: + return neighbors + if self.return_distance: + return numpy.sqrt(distances) + return None + + if self.params.get("implementation") == "keops": + import torch + from pykeops.torch import LazyTensor + + # 'float64' is slow except on super expensive GPUs. Allow it with some param? + XX = torch.tensor(X, dtype=torch.float32) + if X is self.ref_points: + YY = XX + else: + YY = torch.tensor(self.ref_points, dtype=torch.float32) + + p = self.params["p"] + if p == numpy.inf: + # Requires a version of pykeops strictly more recent than 1.3 + mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1) + elif p == 2: # Any even integer? + mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1) + else: + mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1) + + if self.return_index: + if self.return_distance: + distances, neighbors = mat.Kmin_argKmin(k, dim=1) + if p != numpy.inf: + distances = distances ** (1.0 / p) + return neighbors, distances + else: + neighbors = mat.argKmin(k, dim=1) + return neighbors + if self.return_distance: + distances = mat.Kmin(k, dim=1) + if p != numpy.inf: + distances = distances ** (1.0 / p) + return distances + return None + # FIXME: convert everything back to numpy arrays or not? + + if hasattr(self, "kdtree"): + qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}} + distances, neighbors = self.kdtree.query(X, k=self.k, **qargs) + if self.return_index: + if self.return_distance: + return neighbors, distances + else: + return neighbors + if self.return_distance: + return distances + return None + + if self.return_distance: + distances, neighbors = self.nn.kneighbors(X, return_distance=True) + if self.return_index: + return neighbors, distances + else: + return distances + if self.return_index: + neighbors = self.nn.kneighbors(X, return_distance=False) + return neighbors + return None diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py new file mode 100755 index 00000000..57fdd131 --- /dev/null +++ b/src/python/test/test_dtm.py @@ -0,0 +1,32 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Marc Glisse + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +from gudhi.point_cloud.dtm import DTM +import numpy + + +def test_dtm_euclidean(): + pts = numpy.random.rand(1000,4) + k = 3 + dtm = DTM(k,implementation="ckdtree") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="sklearn") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="sklearn",algorithm="brute") + print(dtm.fit_transform(pts)) + dtm = DTM(k,implementation="hnsw") + print(dtm.fit_transform(pts)) + from scipy.spatial.distance import cdist + d = cdist(pts,pts) + dtm = DTM(k,metric="precomputed") + print(dtm.fit_transform(d)) + dtm = DTM(k,implementation="keops") + print(dtm.fit_transform(pts)) + -- cgit v1.2.3 From 5c4c398b99fe1b157d64cd43a4977ce1504ca795 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 26 Mar 2020 22:25:28 +0100 Subject: HNSWlib doesn't define __version__ --- src/cmake/modules/GUDHI_third_party_libraries.cmake | 21 ++++++++++++++++++++- src/python/CMakeLists.txt | 7 ++++--- 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index c2039674..a931b3a1 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -150,6 +150,25 @@ function( find_python_module PYTHON_MODULE_NAME ) endif() endfunction( find_python_module ) +# For modules that do not define module.__version__ +function( find_python_module_no_version PYTHON_MODULE_NAME ) + string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}" + RESULT_VARIABLE PYTHON_MODULE_RESULT + ERROR_VARIABLE PYTHON_MODULE_ERROR) + if(PYTHON_MODULE_RESULT EQUAL 0) + # Remove carriage return + message ("++ Python module ${PYTHON_MODULE_NAME} found") + set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE) + else() + message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME} + - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT} + - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}") + set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE) + endif() +endfunction( find_python_module_no_version ) + if( PYTHONINTERP_FOUND ) find_python_module("cython") find_python_module("pytest") @@ -161,8 +180,8 @@ if( PYTHONINTERP_FOUND ) find_python_module("ot") find_python_module("pybind11") find_python_module("torch") - find_python_module("hnswlib") find_python_module("pykeops") + find_python_module_no_version("hnswlib") endif() if(NOT GUDHI_PYTHON_PATH) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d26d3e6e..ec0ab1ca 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -79,13 +79,14 @@ if(PYTHONINTERP_FOUND) add_gudhi_debug_info("POT version ${OT_VERSION}") endif() if(HNSWLIB_FOUND) - add_gudhi_debug_info("HNSWlib version ${OT_VERSION}") + # Does not have a version number... + add_gudhi_debug_info("HNSWlib found") endif() if(TORCH_FOUND) - add_gudhi_debug_info("PyTorch version ${OT_VERSION}") + add_gudhi_debug_info("PyTorch version ${TORCH_VERSION}") endif() if(PYKEOPS_FOUND) - add_gudhi_debug_info("PyKeOps version ${OT_VERSION}") + add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}") endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") -- cgit v1.2.3 From 68839b95e7751afd04155cd2565cc53362f01fa2 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 28 Mar 2020 10:41:50 +0100 Subject: Missing test --- src/python/CMakeLists.txt | 1 + src/python/test/test_knn.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100755 src/python/test/test_knn.py (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index ec0ab1ca..d7a6a4db 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -411,6 +411,7 @@ if(PYTHONINTERP_FOUND) # DTM if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + add_gudhi_py_test(test_knn) add_gudhi_py_test(test_dtm) endif() diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py new file mode 100755 index 00000000..e455fb48 --- /dev/null +++ b/src/python/test/test_knn.py @@ -0,0 +1,82 @@ +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Marc Glisse + + Copyright (C) 2020 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +from gudhi.point_cloud.knn import KNN +import numpy as np +import pytest + + +def test_knn_explicit(): + base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) + query = np.array([[1.0, 1], [2, 2], [4, 4]]) + knn = KNN(2, metric="manhattan", return_distance=True, return_index=True) + knn.fit(base) + r = knn.transform(query) + assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]])) + assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]])) + + knn = KNN(2, metric="chebyshev", return_distance=True, return_index=False) + knn.fit(base) + r = knn.transform(query) + assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) + r = ( + KNN(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops") + .fit(base) + .transform(query) + ) + assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]])) + + knn = KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True) + knn.fit(base) + r = knn.transform(query) + assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) + r = ( + KNN(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops") + .fit(base) + .transform(query) + ) + assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]]) + + dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]]) + knn = KNN(2, metric="precomputed", return_index=True, return_distance=False) + r = knn.fit_transform(dist) + assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]]) + knn = KNN(2, metric="precomputed", return_index=True, return_distance=True) + r = knn.fit_transform(dist) + assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]]) + assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]]) + + +def test_knn_compare(): + base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]]) + query = np.array([[1.0, 1], [2, 2], [4, 4]]) + r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=False).fit(base).transform(query) + r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=False).fit(base).transform(query) + r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query) + r3 = KNN(2, implementation="keops", return_index=True, return_distance=False).fit(base).transform(query) + assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3) + + r0 = KNN(2, implementation="ckdtree", return_index=True, return_distance=True).fit(base).transform(query) + r1 = KNN(2, implementation="sklearn", return_index=True, return_distance=True).fit(base).transform(query) + r2 = KNN(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query) + r3 = KNN(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query) + assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0]) + d0 = pytest.approx(r0[1]) + assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0 + + +def test_knn_nop(): + # This doesn't look super useful... + p = np.array([[0.0]]) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="sklearn").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="ckdtree").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5).fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, implementation="keops").fit_transform(p) + assert None is KNN(k=1, return_index=False, return_distance=False, metric="precomputed").fit_transform(p) -- cgit v1.2.3 From 266f1eb706ecf31733acbcdded3b04d8d270fb60 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 31 Mar 2020 17:43:53 +0200 Subject: update CMakeLists to make things compatible with wasserstein/ repo --- src/python/CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index b7d43bea..a91ca30a 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -56,7 +56,6 @@ if(PYTHONINTERP_FOUND) # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") - set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'barycenter', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}") @@ -217,8 +216,7 @@ if(PYTHONINTERP_FOUND) # Other .py files file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") - file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") - file(COPY "gudhi/barycenter.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") + file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") add_custom_command( -- cgit v1.2.3 From b908205e85bbe29c8d18ad1f38e783a1327434d7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 14 Apr 2020 17:00:27 +0200 Subject: EagerPy in cmake --- src/cmake/modules/GUDHI_third_party_libraries.cmake | 1 + src/python/CMakeLists.txt | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake index a931b3a1..0abe66b7 100644 --- a/src/cmake/modules/GUDHI_third_party_libraries.cmake +++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake @@ -181,6 +181,7 @@ if( PYTHONINTERP_FOUND ) find_python_module("pybind11") find_python_module("torch") find_python_module("pykeops") + find_python_module("eagerpy") find_python_module_no_version("hnswlib") endif() diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d7a6a4db..99e8b57c 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -88,6 +88,9 @@ if(PYTHONINTERP_FOUND) if(PYKEOPS_FOUND) add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}") endif() + if(EAGERPY_FOUND) + add_gudhi_debug_info("EagerPy version ${EAGERPY_VERSION}") + endif() set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ") set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ") @@ -410,7 +413,7 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_time_delay) # DTM - if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND) + if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND AND EAGERPY_FOUND) add_gudhi_py_test(test_knn) add_gudhi_py_test(test_dtm) endif() -- cgit v1.2.3 From bac284bf7f65c40f03ec8e47316d4f0fd0059c91 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 20 Apr 2020 19:12:35 +0200 Subject: Check that dependencies are present before testing --- src/python/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 10dcd161..5ab63e5d 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -401,7 +401,9 @@ if(PYTHONINTERP_FOUND) # Wasserstein if(OT_FOUND AND PYBIND11_FOUND) - add_gudhi_py_test(test_wasserstein_distance) + if(TORCH_FOUND AND EAGERPY_FOUND) + add_gudhi_py_test(test_wasserstein_distance) + endif() add_gudhi_py_test(test_wasserstein_barycenter) endif() -- cgit v1.2.3 From 3e713cee177e10536ae8fc231e56fa04769a35ee Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 20 Apr 2020 22:06:38 +0200 Subject: Fix #279 --- src/python/CMakeLists.txt | 129 +++++++++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 64 deletions(-) (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 10dcd161..055d5b23 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -242,6 +242,71 @@ if(PYTHONINTERP_FOUND) install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)") + # Documentation generation is available through sphinx - requires all modules + # Make it first as sphinx test is by far the longest test which is nice when testing in parallel + if(SPHINX_PATH) + if(MATPLOTLIB_FOUND) + if(NUMPY_FOUND) + if(SCIPY_FOUND) + if(SKLEARN_FOUND) + if(OT_FOUND) + if(PYBIND11_FOUND) + if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") + # User warning - Sphinx is a static pages generator, and configured to work fine with user_version + # Images and biblio warnings because not found on developper version + if (GUDHI_PYTHON_PATH STREQUAL "src/python") + set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") + endif() + # sphinx target requires gudhi.so, because conf.py reads gudhi version from it + add_custom_target(sphinx + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" + COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) + + add_test(NAME sphinx_py_test + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" + ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) + + # Set missing or not modules + set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") + else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) + else(PYBIND11_FOUND) + message("++ Python documentation module will not be compiled because pybind11 was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(PYBIND11_FOUND) + else(OT_FOUND) + message("++ Python documentation module will not be compiled because POT was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(OT_FOUND) + else(SKLEARN_FOUND) + message("++ Python documentation module will not be compiled because scikit-learn was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SKLEARN_FOUND) + else(SCIPY_FOUND) + message("++ Python documentation module will not be compiled because scipy was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SCIPY_FOUND) + else(NUMPY_FOUND) + message("++ Python documentation module will not be compiled because numpy was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(NUMPY_FOUND) + else(MATPLOTLIB_FOUND) + message("++ Python documentation module will not be compiled because matplotlib was not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(MATPLOTLIB_FOUND) + else(SPHINX_PATH) + message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found") + set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") + endif(SPHINX_PATH) + + # Test examples if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) # Bottleneck and Alpha @@ -419,70 +484,6 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_dtm) endif() - # Documentation generation is available through sphinx - requires all modules - if(SPHINX_PATH) - if(MATPLOTLIB_FOUND) - if(NUMPY_FOUND) - if(SCIPY_FOUND) - if(SKLEARN_FOUND) - if(OT_FOUND) - if(PYBIND11_FOUND) - if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/") - # User warning - Sphinx is a static pages generator, and configured to work fine with user_version - # Images and biblio warnings because not found on developper version - if (GUDHI_PYTHON_PATH STREQUAL "src/python") - set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss") - endif() - # sphinx target requires gudhi.so, because conf.py reads gudhi version from it - add_custom_target(sphinx - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx - DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so" - COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM) - - add_test(NAME sphinx_py_test - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}" - ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest) - - # Set missing or not modules - set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES") - else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - else(PYBIND11_FOUND) - message("++ Python documentation module will not be compiled because pybind11 was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(PYBIND11_FOUND) - else(OT_FOUND) - message("++ Python documentation module will not be compiled because POT was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(OT_FOUND) - else(SKLEARN_FOUND) - message("++ Python documentation module will not be compiled because scikit-learn was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SKLEARN_FOUND) - else(SCIPY_FOUND) - message("++ Python documentation module will not be compiled because scipy was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SCIPY_FOUND) - else(NUMPY_FOUND) - message("++ Python documentation module will not be compiled because numpy was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(NUMPY_FOUND) - else(MATPLOTLIB_FOUND) - message("++ Python documentation module will not be compiled because matplotlib was not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(MATPLOTLIB_FOUND) - else(SPHINX_PATH) - message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found") - set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") - endif(SPHINX_PATH) - - # Set missing or not modules set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES") else(CYTHON_FOUND) -- cgit v1.2.3 From 47e5ac79af3a354358515c0213b28848f878fde6 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Wed, 6 May 2020 22:59:36 +0200 Subject: Reimplement the bottleneck python wrapper with pybind11 --- src/python/CMakeLists.txt | 33 ++++++++++--------- src/python/gudhi/bottleneck.cc | 51 +++++++++++++++++++++++++++++ src/python/gudhi/bottleneck.pyx | 48 --------------------------- src/python/gudhi/hera.cc | 32 +----------------- src/python/include/pybind11_diagram_utils.h | 39 ++++++++++++++++++++++ src/python/setup.py.in | 19 +++++++++-- 6 files changed, 125 insertions(+), 97 deletions(-) create mode 100644 src/python/gudhi/bottleneck.cc delete mode 100644 src/python/gudhi/bottleneck.pyx create mode 100644 src/python/include/pybind11_diagram_utils.h (limited to 'src/python/CMakeLists.txt') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index d712e189..976a8b52 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -34,6 +34,7 @@ endfunction( add_gudhi_debug_info ) if(PYTHONINTERP_FOUND) if(PYBIND11_FOUND) add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}") + set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") endif() if(CYTHON_FOUND) @@ -46,7 +47,6 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'reader_utils', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'witness_complex', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'strong_witness_complex', ") - set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'nerve_gic', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'subsampling', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'tangential_complex', ") @@ -120,24 +120,25 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ") endif (EIGEN3_FOUND) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'off_reader', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'simplex_tree', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'rips_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'cubical_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'periodic_cubical_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'reader_utils', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'witness_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'strong_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'simplex_tree', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'rips_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'cubical_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'periodic_cubical_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'reader_utils', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'strong_witness_complex', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'bottleneck', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ") - set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ") + set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ") endif () if(CGAL_FOUND) diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc new file mode 100644 index 00000000..577e5e0b --- /dev/null +++ b/src/python/gudhi/bottleneck.cc @@ -0,0 +1,51 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Marc Glisse + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include + +#include + +double bottleneck(Dgm d1, Dgm d2, double epsilon) +{ + // I *think* the call to request() has to be before releasing the GIL. + auto diag1 = numpy_to_range_of_pairs(d1); + auto diag2 = numpy_to_range_of_pairs(d2); + + py::gil_scoped_release release; + + return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, epsilon); +} + +PYBIND11_MODULE(bottleneck, m) { + m.attr("__license__") = "GPL v3"; + m.def("bottleneck_distance", &bottleneck, + py::arg("diagram_1"), py::arg("diagram_2"), + py::arg("e") = (std::numeric_limits::min)(), + R"pbdoc( + This function returns the point corresponding to a given vertex. + + :param diagram_1: The first diagram. + :type diagram_1: vector[pair[double, double]] + :param diagram_2: The second diagram. + :type diagram_2: vector[pair[double, double]] + :param e: If `e` is 0, this uses an expensive algorithm to compute the + exact distance. + If `e` is not 0, it asks for an additive `e`-approximation, and + currently also allows a small multiplicative error (the last 2 or 3 + bits of the mantissa may be wrong). This version of the algorithm takes + advantage of the limited precision of `double` and is usually a lot + faster to compute, whatever the value of `e`. + + Thus, by default, `e` is the smallest positive double. + :type e: float + :rtype: float + :returns: the bottleneck distance. + )pbdoc"); +} diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx deleted file mode 100644 index af011e88..00000000 --- a/src/python/gudhi/bottleneck.pyx +++ /dev/null @@ -1,48 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Vincent Rouvreau -# -# Copyright (C) 2016 Inria -# -# Modification(s): -# - YYYY/MM Author: Description of the modification - -from cython cimport numeric -from libcpp.vector cimport vector -from libcpp.utility cimport pair -import os - -__author__ = "Vincent Rouvreau" -__copyright__ = "Copyright (C) 2016 Inria" -__license__ = "GPL v3" - -cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram": - double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double) - double bottleneck(vector[pair[double, double]], vector[pair[double, double]]) - -def bottleneck_distance(diagram_1, diagram_2, e=None): - """This function returns the point corresponding to a given vertex. - - :param diagram_1: The first diagram. - :type diagram_1: vector[pair[double, double]] - :param diagram_2: The second diagram. - :type diagram_2: vector[pair[double, double]] - :param e: If `e` is 0, this uses an expensive algorithm to compute the - exact distance. - If `e` is not 0, it asks for an additive `e`-approximation, and - currently also allows a small multiplicative error (the last 2 or 3 - bits of the mantissa may be wrong). This version of the algorithm takes - advantage of the limited precision of `double` and is usually a lot - faster to compute, whatever the value of `e`. - - Thus, by default, `e` is the smallest positive double. - :type e: float - :rtype: float - :returns: the bottleneck distance. - """ - if e is None: - # Default value is the smallest double value (not 0, 0 is for exact version) - return bottleneck(diagram_1, diagram_2) - else: - # Can be 0 for exact version - return bottleneck(diagram_1, diagram_2, e) diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc index 5aec1806..ea80a9a8 100644 --- a/src/python/gudhi/hera.cc +++ b/src/python/gudhi/hera.cc @@ -8,39 +8,9 @@ * - YYYY/MM Author: Description of the modification */ -#include -#include - -#include -#include - #include // Hera -#include - -namespace py = pybind11; -typedef py::array_t Dgm; - -// Get m[i,0] and m[i,1] as a pair -static auto pairify(void* p, ssize_t h, ssize_t w) { - return [=](ssize_t i){ - char* birth = (char*)p + i * h; - char* death = birth + w; - return std::make_pair(*(double*)birth, *(double*)death); - }; -} - -inline auto numpy_to_range_of_pairs(py::array_t dgm) { - py::buffer_info buf = dgm.request(); - // shape (n,2) or (0) for empty - if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0)) - throw std::runtime_error("Diagram must be an array of size n x 2"); - // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it. - ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0; - auto cnt = boost::counting_range(0, buf.shape[0]); - return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1)); - // Be careful that the returned range cannot contain references to dead temporaries. -} +#include double wasserstein_distance( Dgm d1, Dgm d2, diff --git a/src/python/include/pybind11_diagram_utils.h b/src/python/include/pybind11_diagram_utils.h new file mode 100644 index 00000000..d9627258 --- /dev/null +++ b/src/python/include/pybind11_diagram_utils.h @@ -0,0 +1,39 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Marc Glisse + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include +#include + +namespace py = pybind11; +typedef py::array_t Dgm; + +// Get m[i,0] and m[i,1] as a pair +static auto pairify(void* p, ssize_t h, ssize_t w) { + return [=](ssize_t i){ + char* birth = (char*)p + i * h; + char* death = birth + w; + return std::make_pair(*(double*)birth, *(double*)death); + }; +} + +inline auto numpy_to_range_of_pairs(py::array_t dgm) { + py::buffer_info buf = dgm.request(); + // shape (n,2) or (0) for empty + if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0)) + throw std::runtime_error("Diagram must be an array of size n x 2"); + // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it. + ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0; + auto cnt = boost::counting_range(0, buf.shape[0]); + return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1)); + // Be careful that the returned range cannot contain references to dead temporaries. +} diff --git a/src/python/setup.py.in b/src/python/setup.py.in index f968bd59..852da910 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -18,7 +18,8 @@ __author__ = "Vincent Rouvreau" __copyright__ = "Copyright (C) 2016 Inria" __license__ = "MIT" -modules = [@GUDHI_PYTHON_MODULES_TO_COMPILE@] +cython_modules = [@GUDHI_CYTHON_MODULES@] +pybind11_modules = [@GUDHI_PYBIND11_MODULES@] source_dir='@CMAKE_CURRENT_SOURCE_DIR@/gudhi/' extra_compile_args=[@GUDHI_PYTHON_EXTRA_COMPILE_ARGS@] @@ -30,7 +31,7 @@ runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@] # Create ext_modules list from module list ext_modules = [] -for module in modules: +for module in cython_modules: ext_modules.append(Extension( 'gudhi.' + module, sources = [source_dir + module + '.pyx',], @@ -55,6 +56,20 @@ ext_modules.append(Extension( extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], )) +if "bottleneck" in pybind11_modules: + ext_modules.append(Extension( + 'gudhi.bottleneck', + sources = [source_dir + 'bottleneck.cc'], + language = 'c++', + include_dirs = include_dirs + + [pybind11.get_include(False), pybind11.get_include(True)], + extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@], + extra_link_args=extra_link_args, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=runtime_library_dirs, + )) + setup( name = 'gudhi', packages=find_packages(), # find_namespace_packages(include=["gudhi*"]) -- cgit v1.2.3