summaryrefslogtreecommitdiff
path: root/src/python/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/python/test')
-rwxr-xr-xsrc/python/test/test_alpha_complex.py5
-rwxr-xr-xsrc/python/test/test_cover_complex.py4
-rwxr-xr-xsrc/python/test/test_cubical_complex.py16
-rwxr-xr-xsrc/python/test/test_dtm.py68
-rwxr-xr-xsrc/python/test/test_euclidean_witness_complex.py6
-rwxr-xr-xsrc/python/test/test_knn.py130
-rwxr-xr-xsrc/python/test/test_rips_complex.py6
-rwxr-xr-xsrc/python/test/test_simplex_generators.py64
-rwxr-xr-xsrc/python/test/test_simplex_tree.py110
-rwxr-xr-xsrc/python/test/test_subsampling.py1
-rwxr-xr-xsrc/python/test/test_tangential_complex.py3
-rwxr-xr-xsrc/python/test/test_time_delay.py43
-rwxr-xr-xsrc/python/test/test_wasserstein_barycenter.py46
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py109
-rw-r--r--src/python/test/test_weighted_rips.py63
15 files changed, 635 insertions, 39 deletions
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index 3761fe16..77121302 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -40,7 +40,7 @@ def test_infinite_alpha():
assert simplex_tree.num_simplices() == 11
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -53,6 +53,7 @@ def test_infinite_alpha():
([0, 1, 2], 0.5),
([1, 2, 3], 0.5),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 0.25),
@@ -105,7 +106,7 @@ def test_filtered_alpha():
else:
assert False
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
diff --git a/src/python/test/test_cover_complex.py b/src/python/test/test_cover_complex.py
index 32bc5a26..260f6a5c 100755
--- a/src/python/test/test_cover_complex.py
+++ b/src/python/test/test_cover_complex.py
@@ -9,6 +9,7 @@
"""
from gudhi import CoverComplex
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2018 Inria"
@@ -24,7 +25,8 @@ def test_empty_constructor():
def test_non_existing_file_read():
# Try to open a non existing file
cover = CoverComplex()
- assert cover.read_point_cloud("pouetpouettralala.toubiloubabdou") == False
+ with pytest.raises(FileNotFoundError):
+ cover.read_point_cloud("pouetpouettralala.toubiloubabdou")
def test_files_creation():
diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py
index 8c1b2600..5c59db8f 100755
--- a/src/python/test/test_cubical_complex.py
+++ b/src/python/test/test_cubical_complex.py
@@ -10,6 +10,7 @@
from gudhi import CubicalComplex, PeriodicCubicalComplex
import numpy as np
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -25,9 +26,8 @@ def test_empty_constructor():
def test_non_existing_perseus_file_constructor():
# Try to open a non existing file
- cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou")
- assert cub.__is_defined() == False
- assert cub.__is_persistence_defined() == False
+ with pytest.raises(FileNotFoundError):
+ cub = CubicalComplex(perseus_file="pouetpouettralala.toubiloubabdou")
def test_dimension_or_perseus_file_constructor():
@@ -147,3 +147,13 @@ def test_connected_sublevel_sets():
periodic_dimensions = periodic_dimensions)
assert cub.persistence() == [(0, (2.0, float("inf")))]
assert cub.betti_numbers() == [1, 0, 0]
+
+def test_cubical_generators():
+ cub = CubicalComplex(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ cub.persistence()
+ g = cub.cofaces_of_persistence_pairs()
+ assert len(g[0]) == 2
+ assert len(g[1]) == 1
+ assert np.array_equal(g[0][0], np.empty(shape=[0,2]))
+ assert np.array_equal(g[0][1], np.array([[7, 4]]))
+ assert np.array_equal(g[1][0], np.array([8]))
diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py
new file mode 100755
index 00000000..bff4c267
--- /dev/null
+++ b/src/python/test/test_dtm.py
@@ -0,0 +1,68 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.dtm import DistanceToMeasure
+import numpy
+import pytest
+import torch
+
+
+def test_dtm_compare_euclidean():
+ pts = numpy.random.rand(1000, 4)
+ k = 6
+ dtm = DistanceToMeasure(k, implementation="ckdtree")
+ r0 = dtm.fit_transform(pts)
+ dtm = DistanceToMeasure(k, implementation="sklearn")
+ r1 = dtm.fit_transform(pts)
+ assert r1 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="sklearn", algorithm="brute")
+ r2 = dtm.fit_transform(pts)
+ assert r2 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="hnsw")
+ r3 = dtm.fit_transform(pts)
+ assert r3 == pytest.approx(r0, rel=0.1)
+ from scipy.spatial.distance import cdist
+
+ d = cdist(pts, pts)
+ dtm = DistanceToMeasure(k, metric="precomputed")
+ r4 = dtm.fit_transform(d)
+ assert r4 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, metric="precomputed", n_jobs=2)
+ r4b = dtm.fit_transform(d)
+ assert r4b == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="keops")
+ r5 = dtm.fit_transform(pts)
+ assert r5 == pytest.approx(r0)
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="keops", enable_autodiff=True)
+ r6 = dtm.fit_transform(pts2)
+ assert r6.detach().numpy() == pytest.approx(r0)
+ r6.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True)
+ r7 = dtm.fit_transform(pts2)
+ assert r7.detach().numpy() == pytest.approx(r0)
+ r7.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+
+
+def test_dtm_precomputed():
+ dist = numpy.array([[1.0, 3, 8], [1, 5, 5], [0, 2, 3]])
+ dtm = DistanceToMeasure(2, q=1, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 3, 1])
+
+ dist = numpy.array([[2.0, 2], [0, 1], [3, 4]])
+ dtm = DistanceToMeasure(2, q=2, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 0.707, 3.5355], rel=0.01)
diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py
index c18d2484..f3664d39 100755
--- a/src/python/test/test_euclidean_witness_complex.py
+++ b/src/python/test/test_euclidean_witness_complex.py
@@ -40,7 +40,7 @@ def test_witness_complex():
assert landmarks[1] == euclidean_witness_complex.get_point(1)
assert landmarks[2] == euclidean_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([0, 1], 0.0),
@@ -78,13 +78,13 @@ def test_strong_witness_complex():
assert landmarks[1] == euclidean_strong_witness_complex.get_point(1)
assert landmarks[2] == euclidean_strong_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
+ assert list(simplex_tree.get_filtration()) == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
simplex_tree = euclidean_strong_witness_complex.create_simplex_tree(
max_alpha_square=100.0
)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py
new file mode 100755
index 00000000..a87ec212
--- /dev/null
+++ b/src/python/test/test_knn.py
@@ -0,0 +1,130 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.knn import KNearestNeighbors
+import numpy as np
+import pytest
+
+
+def test_knn_explicit():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ knn = KNearestNeighbors(2, metric="manhattan", return_distance=True, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]]))
+ assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops", enable_autodiff=True)
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+ r = (
+ KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+
+ dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+ # Second time in parallel
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, n_jobs=2)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+
+
+def test_knn_compare():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = (
+ KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query)
+ )
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3)
+
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query)
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query)
+ )
+ assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0])
+ d0 = pytest.approx(r0[1])
+ assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0
+
+
+def test_knn_nop():
+ # This doesn't look super useful...
+ p = np.array([[0.0]])
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="sklearn"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="ckdtree"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="keops"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, metric="precomputed"
+ ).fit_transform(p)
diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py
index b02a68e1..b86e7498 100755
--- a/src/python/test/test_rips_complex.py
+++ b/src/python/test/test_rips_complex.py
@@ -32,7 +32,7 @@ def test_rips_from_points():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -44,6 +44,7 @@ def test_rips_from_points():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
@@ -95,7 +96,7 @@ def test_rips_from_distance_matrix():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -107,6 +108,7 @@ def test_rips_from_distance_matrix():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py
new file mode 100755
index 00000000..8a9b4844
--- /dev/null
+++ b/src/python/test/test_simplex_generators.py
@@ -0,0 +1,64 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi
+import numpy as np
+
+
+def test_flag_generators():
+ pts = np.array([[0, 0], [0, 1.01], [1, 0], [1.02, 1.03], [100, 0], [100, 3.01], [103, 0], [103.02, 3.03]])
+ r = gudhi.RipsComplex(pts, max_edge_length=4)
+ st = r.create_simplex_tree(max_dimension=50)
+ st.persistence()
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], [[2, 2, 0], [1, 1, 0], [3, 3, 1], [6, 6, 4], [5, 5, 4], [7, 7, 5]])
+ assert len(g[1]) == 1
+ assert np.array_equal(g[1][0], [[3, 2, 2, 1]])
+ assert np.array_equal(g[2], [0, 4])
+ assert len(g[3]) == 1
+ assert np.array_equal(g[3][0], [[7, 6]])
+ # Compare trivial cases (where the simplex is the generator) with persistence_pairs.
+ # This still makes assumptions on the order of vertices in a simplex and could be more robust.
+ pairs = st.persistence_pairs()
+ assert {tuple(i) for i in g[0]} == {(i[0][0],) + tuple(i[1]) for i in pairs if len(i[0]) == 1 and len(i[1]) != 0}
+ assert {(i[0], i[1]) for i in g[1][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) != 0}
+ assert set(g[2]) == {i[0][0] for i in pairs if len(i[0]) == 1 and len(i[1]) == 0}
+ assert {(i[0], i[1]) for i in g[3][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) == 0}
+
+
+def test_lower_star_generators():
+ st = gudhi.SimplexTree()
+ st.insert([0, 1, 2], -10)
+ st.insert([0, 3], -10)
+ st.insert([1, 3], -10)
+ st.assign_filtration([2], -1)
+ st.assign_filtration([3], 0)
+ st.assign_filtration([0], 1)
+ st.assign_filtration([1], 2)
+ st.make_filtration_non_decreasing()
+ st.persistence(min_persistence=-1)
+ g = st.lower_star_persistence_generators()
+ assert len(g[0]) == 2
+ assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]])
+ assert np.array_equal(g[0][1], [[1, 1]])
+ assert len(g[1]) == 2
+ assert np.array_equal(g[1][0], [2])
+ assert np.array_equal(g[1][1], [1])
+
+
+def test_empty():
+ st = gudhi.SimplexTree()
+ st.persistence()
+ assert st.lower_star_persistence_generators() == ([], [])
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], np.empty((0, 3)))
+ assert g[1] == []
+ assert np.array_equal(g[2], [])
+ assert g[3] == []
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 1822c43b..2137d822 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -9,6 +9,7 @@
"""
from gudhi import SimplexTree
+import pytest
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -45,7 +46,6 @@ def test_insertion():
assert st.find([2, 3]) == False
# filtration test
- st.initialize_filtration()
assert st.filtration([0, 1, 2]) == 4.0
assert st.filtration([0, 2]) == 4.0
assert st.filtration([1, 2]) == 4.0
@@ -55,7 +55,7 @@ def test_insertion():
assert st.filtration([1]) == 0.0
# skeleton test
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1, 2], 4.0),
([0, 1], 0.0),
([0, 2], 4.0),
@@ -64,7 +64,7 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(1) == [
+ assert list(st.get_skeleton(1)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -72,12 +72,12 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(0) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
+ assert list(st.get_skeleton(0)) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
# remove_maximal_simplex test
assert st.get_cofaces([0, 1, 2], 1) == []
st.remove_maximal_simplex([0, 1, 2])
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -92,7 +92,6 @@ def test_insertion():
assert st.find([1]) == True
assert st.find([2]) == True
- st.initialize_filtration()
assert st.persistence(persistence_dim_max=True) == [
(1, (4.0, float("inf"))),
(0, (0.0, float("inf"))),
@@ -126,7 +125,8 @@ def test_expansion():
assert st.num_vertices() == 7
assert st.num_simplices() == 17
- assert st.get_filtration() == [
+
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -149,9 +149,8 @@ def test_expansion():
st.expansion(3)
assert st.num_vertices() == 7
assert st.num_simplices() == 22
- st.initialize_filtration()
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -248,3 +247,96 @@ def test_make_filtration_non_decreasing():
assert st.filtration([3, 4, 5]) == 2.0
assert st.filtration([3, 4]) == 2.0
assert st.filtration([4, 5]) == 2.0
+
+def test_extend_filtration():
+
+ # Inserted simplex:
+ # 5 4
+ # o o
+ # / \ /
+ # o o
+ # /2\ /3
+ # o o
+ # 1 0
+
+ st = SimplexTree()
+ st.insert([0,2])
+ st.insert([1,2])
+ st.insert([0,3])
+ st.insert([2,5])
+ st.insert([3,4])
+ st.insert([3,5])
+ st.assign_filtration([0], 1.)
+ st.assign_filtration([1], 2.)
+ st.assign_filtration([2], 3.)
+ st.assign_filtration([3], 4.)
+ st.assign_filtration([4], 5.)
+ st.assign_filtration([5], 6.)
+
+ assert list(st.get_filtration()) == [
+ ([0, 2], 0.0),
+ ([1, 2], 0.0),
+ ([0, 3], 0.0),
+ ([3, 4], 0.0),
+ ([2, 5], 0.0),
+ ([3, 5], 0.0),
+ ([0], 1.0),
+ ([1], 2.0),
+ ([2], 3.0),
+ ([3], 4.0),
+ ([4], 5.0),
+ ([5], 6.0)
+ ]
+
+ st.extend_filtration()
+
+ assert list(st.get_filtration()) == [
+ ([6], -3.0),
+ ([0], -2.0),
+ ([1], -1.8),
+ ([2], -1.6),
+ ([0, 2], -1.6),
+ ([1, 2], -1.6),
+ ([3], -1.4),
+ ([0, 3], -1.4),
+ ([4], -1.2),
+ ([3, 4], -1.2),
+ ([5], -1.0),
+ ([2, 5], -1.0),
+ ([3, 5], -1.0),
+ ([5, 6], 1.0),
+ ([4, 6], 1.2),
+ ([3, 6], 1.4),
+ ([3, 4, 6], 1.4),
+ ([3, 5, 6], 1.4),
+ ([2, 6], 1.6),
+ ([2, 5, 6], 1.6),
+ ([1, 6], 1.8),
+ ([1, 2, 6], 1.8),
+ ([0, 6], 2.0),
+ ([0, 2, 6], 2.0),
+ ([0, 3, 6], 2.0)
+ ]
+
+ dgms = st.extended_persistence(min_persistence=-1.)
+
+ assert dgms[0][0][1][0] == pytest.approx(2.)
+ assert dgms[0][0][1][1] == pytest.approx(3.)
+ assert dgms[1][0][1][0] == pytest.approx(5.)
+ assert dgms[1][0][1][1] == pytest.approx(4.)
+ assert dgms[2][0][1][0] == pytest.approx(1.)
+ assert dgms[2][0][1][1] == pytest.approx(6.)
+ assert dgms[3][0][1][0] == pytest.approx(6.)
+ assert dgms[3][0][1][1] == pytest.approx(1.)
+
+def test_simplices_iterator():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2], filtration=4.0) == True
+ assert st.insert([2, 3, 4], filtration=2.0) == True
+
+ for simplex in st.get_simplices():
+ print("simplex is: ", simplex[0])
+ assert st.find(simplex[0]) == True
+ print("filtration is: ", simplex[1])
+ assert st.filtration(simplex[0]) == simplex[1]
diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py
index fe0985fa..31f64e32 100755
--- a/src/python/test/test_subsampling.py
+++ b/src/python/test/test_subsampling.py
@@ -120,7 +120,6 @@ def test_simple_pick_n_random_points():
# Go furter than point set on purpose
for iter in range(1, 10):
sub_set = gudhi.pick_n_random_points(points=point_set, nb_points=iter)
- print(5)
for sub in sub_set:
found = False
for point in point_set:
diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py
index e650e99c..8668a2e0 100755
--- a/src/python/test/test_tangential_complex.py
+++ b/src/python/test/test_tangential_complex.py
@@ -37,7 +37,7 @@ def test_tangential():
assert st.num_simplices() == 6
assert st.num_vertices() == 4
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -45,6 +45,7 @@ def test_tangential():
([3], 0.0),
([1, 3], 0.0),
]
+
assert st.get_cofaces([0], 1) == [([0, 2], 0.0)]
assert point_list[0] == tc.get_point(0)
diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py
new file mode 100755
index 00000000..1ead9bca
--- /dev/null
+++ b/src/python/test/test_time_delay.py
@@ -0,0 +1,43 @@
+from gudhi.point_cloud.timedelay import TimeDelayEmbedding
+import numpy as np
+
+
+def test_normal():
+ # Sample array
+ ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ # Normal case.
+ prep = TimeDelayEmbedding()
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([2, 3, 4])).all()
+ assert (pointclouds[2] == np.array([3, 4, 5])).all()
+ assert (pointclouds[3] == np.array([4, 5, 6])).all()
+ assert (pointclouds[4] == np.array([5, 6, 7])).all()
+ assert (pointclouds[5] == np.array([6, 7, 8])).all()
+ assert (pointclouds[6] == np.array([7, 8, 9])).all()
+ assert (pointclouds[7] == np.array([8, 9, 10])).all()
+ # Delay = 3
+ prep = TimeDelayEmbedding(delay=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 4, 7])).all()
+ assert (pointclouds[1] == np.array([2, 5, 8])).all()
+ assert (pointclouds[2] == np.array([3, 6, 9])).all()
+ assert (pointclouds[3] == np.array([4, 7, 10])).all()
+ # Skip = 3
+ prep = TimeDelayEmbedding(skip=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([4, 5, 6])).all()
+ assert (pointclouds[2] == np.array([7, 8, 9])).all()
+ # Delay = 2 / Skip = 2
+ prep = TimeDelayEmbedding(delay=2, skip=2)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 3, 5])).all()
+ assert (pointclouds[1] == np.array([3, 5, 7])).all()
+ assert (pointclouds[2] == np.array([5, 7, 9])).all()
+
+ # Vector series
+ ts = np.arange(0, 10).reshape(-1, 2)
+ prep = TimeDelayEmbedding(dim=4)
+ prep.fit([ts])
+ assert (prep.transform([ts])[0] == [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7], [6, 7, 8, 9]]).all()
diff --git a/src/python/test/test_wasserstein_barycenter.py b/src/python/test/test_wasserstein_barycenter.py
new file mode 100755
index 00000000..f68c748e
--- /dev/null
+++ b/src/python/test/test_wasserstein_barycenter.py
@@ -0,0 +1,46 @@
+from gudhi.wasserstein.barycenter import lagrangian_barycenter
+import numpy as np
+
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Theo Lacombe
+
+ Copyright (C) 2019 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+__author__ = "Theo Lacombe"
+__copyright__ = "Copyright (C) 2019 Inria"
+__license__ = "MIT"
+
+
+def test_lagrangian_barycenter():
+
+ dg1 = np.array([[0.2, 0.5]])
+ dg2 = np.array([[0.2, 0.7]])
+ dg3 = np.array([[0.3, 0.6], [0.7, 0.8], [0.2, 0.3]])
+ dg4 = np.array([])
+ dg5 = np.array([])
+ dg6 = np.array([])
+ res = np.array([[0.27916667, 0.55416667], [0.7375, 0.7625], [0.2375, 0.2625]])
+
+ dg7 = np.array([[0.1, 0.15], [0.1, 0.7], [0.2, 0.22], [0.55, 0.84], [0.11, 0.91], [0.61, 0.75], [0.33, 0.46], [0.12, 0.41], [0.32, 0.48]])
+ dg8 = np.array([[0., 4.], [4, 8]])
+
+ # error crit.
+ eps = 1e-7
+
+
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg1, dg2, dg3, dg4],init=3, verbose=False) - res) < eps
+ assert np.array_equal(lagrangian_barycenter(pdiagset=[dg4, dg5, dg6], verbose=False), np.empty(shape=(0,2)))
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg7], verbose=False) - dg7) < eps
+ Y, log = lagrangian_barycenter(pdiagset=[dg4, dg8], verbose=True)
+ assert np.linalg.norm(Y - np.array([[1,3], [5, 7]])) < eps
+ assert np.abs(log["energy"] - 2) < eps
+ assert np.array_equal(log["groupings"][0] , np.array([[0, -1], [1, -1]]))
+ assert np.array_equal(log["groupings"][1] , np.array([[0, 0], [1, 1]]))
+ assert np.linalg.norm(lagrangian_barycenter(pdiagset=[dg8, dg4], init=np.array([[0.2, 0.6], [0.5, 0.7]]), verbose=False) - np.array([[1, 3], [5, 7]])) < eps
+ assert lagrangian_barycenter(pdiagset = []) is None
+
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 43dda77e..90d26809 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -1,6 +1,6 @@
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Theo Lacombe
+ Author(s): Theo Lacombe, Marc Glisse
Copyright (C) 2019 Inria
@@ -8,41 +8,116 @@
- YYYY/MM Author: Description of the modification
"""
-from gudhi.wasserstein import wasserstein_distance
+from gudhi.wasserstein.wasserstein import _proj_on_diag
+from gudhi.wasserstein import wasserstein_distance as pot
+from gudhi.hera import wasserstein_distance as hera
import numpy as np
+import pytest
__author__ = "Theo Lacombe"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
+def test_proj_on_diag():
+ dgm = np.array([[1., 1.], [1., 2.], [3., 5.]])
+ assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]])
+ empty = np.empty((0, 2))
+ assert np.array_equal(_proj_on_diag(empty), empty)
-def test_basic_wasserstein():
+def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
diag3 = np.array([[0, 2], [4, 6]])
diag4 = np.array([[0, 3], [4, 8]])
- emptydiag = np.array([[]])
+ emptydiag = np.array([])
+
+ # We just need to handle positive numbers here
+ def approx(x):
+ return pytest.approx(x, rel=delta)
assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=2.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=2.) == 0.
- assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == 2.
- assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == 4.
+ assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == approx(2.)
+ assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == approx(4.)
+
+ assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == approx(5.) # thank you Pythagorician triplets
+ assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == approx(2.5)
+ assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == approx(3.5355339059327378)
+
+ assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == approx(1.4453593023967701)
+ assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == approx(0.9772734057168739)
+
+ assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == approx(3.141592214572228)
+
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == approx(3.)
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == approx(3.) # no diag matching here
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == approx(np.sqrt(5))
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == approx(np.sqrt(5))
+ assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == approx(np.sqrt(5))
+
+ if test_infinity:
+ diag5 = np.array([[0, 3], [4, np.inf]])
+ diag6 = np.array([[7, 8], [4, 6], [3, np.inf]])
+
+ assert wasserstein_distance(diag4, diag5) == np.inf
+ assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.)
+
+
+ if test_matching:
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1]
+ assert np.array_equal(match , [[-1, 0], [-1, 1]])
+ match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match , [[0, -1], [1, -1]])
+ match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1]
+ assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]])
+
+
+
+def hera_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return hera(*kargs,**kwargs,**extra)
+ return fun
- assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == 5. # thank you Pythagorician triplets
- assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == 2.5
- assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == 3.5355339059327378
+def pot_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return pot(*kargs,**kwargs,**extra)
+ return fun
- assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == 1.4453593023967701
- assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == 0.9772734057168739
+def test_wasserstein_distance_pot():
+ _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True)
+ _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False)
- assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == 3.141592214572228
+def test_wasserstein_distance_hera():
+ _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
- assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == 3.
- assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == 3. # no diag matching here
- assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == np.sqrt(5)
- assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == np.sqrt(5)
- assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == np.sqrt(5)
+def test_wasserstein_distance_grad():
+ import torch
+ diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
+ diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ assert diag1.grad is None and diag2.grad is None and diag3.grad is None
+ dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
+ dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
+ dist12.backward()
+ dist30.backward()
+ assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
+ diag4 = torch.tensor([[0., 10.]], requires_grad=True)
+ diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+ dist45.backward()
+ assert np.array_equal(diag4.grad, [[-1., -1.]])
+ assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
+ diag6 = torch.tensor([[5., 10.]], requires_grad=True)
+ pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ # assert np.array_equal(diag6.grad, [[0., 0.]])
diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py
new file mode 100644
index 00000000..7ef48333
--- /dev/null
+++ b/src/python/test/test_weighted_rips.py
@@ -0,0 +1,63 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Yuichi Ike and Masatoshi Takenouchi
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.weighted_rips_complex import WeightedRipsComplex
+from gudhi.point_cloud.dtm import DistanceToMeasure
+import numpy as np
+from math import sqrt
+from scipy.spatial.distance import cdist
+import pytest
+
+def test_non_dtm_rips_complex():
+ dist = [[], [1]]
+ weights = [1, 100]
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ assert st.filtration([0,1]) == pytest.approx(200.0)
+
+def test_compatibility_with_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=42)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+ assert list(st.get_filtration()) == [
+ ([0], 0.0),
+ ([1], 0.0),
+ ([2], 0.0),
+ ([3], 0.0),
+ ([0, 1], 1.0),
+ ([0, 2], 1.0),
+ ([1, 3], 1.0),
+ ([2, 3], 1.0),
+ ([1, 2], sqrt(2)),
+ ([0, 3], sqrt(2)),
+ ]
+
+def test_compatibility_with_filtered_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=1.0)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+
+ assert st.__is_defined() == True
+ assert st.__is_persistence_defined() == False
+
+ assert st.num_simplices() == 8
+ assert st.num_vertices() == 4
+
+def test_dtm_rips_complex():
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dist = cdist(pts,pts)
+ dtm = DistanceToMeasure(2, q=2, metric="precomputed")
+ r = dtm.fit_transform(dist)
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ st.persistence()
+ persistence_intervals0 = st.persistence_intervals_in_dimension(0)
+ assert persistence_intervals0 == pytest.approx(np.array([[3.16227766, 5.39834564],[3.16227766, 5.39834564], [3.16227766, float("inf")]]))
+