summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorGard Spreemann <gspr@nonempty.org>2021-12-29 19:26:32 +0100
committerGard Spreemann <gspr@nonempty.org>2021-12-29 19:26:32 +0100
commit367366a649f57a147456f11f7e803de12ced3b8f (patch)
treea900af1302f4a6923323d203ae8cc22550b59e8f /test
parent88d850422a838c29d70ef757d04ab57707d7cd26 (diff)
parentedab1c60630f95b38db430017585d06253c92817 (diff)
Merge branch 'dfsg/latest' into debian/sid
Diffstat (limited to 'test')
-rw-r--r--test/conftest.py12
-rw-r--r--test/test_1d_solver.py68
-rw-r--r--test/test_backend.py71
-rw-r--r--test/test_bregman.py46
-rw-r--r--test/test_gromov.py119
-rw-r--r--test/test_optim.py4
-rw-r--r--test/test_ot.py52
-rw-r--r--test/test_sliced.py57
-rw-r--r--test/test_utils.py20
9 files changed, 422 insertions, 27 deletions
diff --git a/test/conftest.py b/test/conftest.py
index 987d98e..c0db8ab 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -5,7 +5,7 @@
# License: MIT License
import pytest
-from ot.backend import jax
+from ot.backend import jax, tf
from ot.backend import get_backend_list
import functools
@@ -13,6 +13,10 @@ if jax:
from jax.config import config
config.update("jax_enable_x64", True)
+if tf:
+ from tensorflow.python.ops.numpy_ops import np_config
+ np_config.enable_numpy_behavior()
+
backend_list = get_backend_list()
@@ -24,16 +28,16 @@ def nx(request):
def skip_arg(arg, value, reason=None, getter=lambda x: x):
- if isinstance(arg, tuple) or isinstance(arg, list):
+ if isinstance(arg, (tuple, list)):
n = len(arg)
else:
arg = (arg, )
n = 1
- if n != 1 and (isinstance(value, tuple) or isinstance(value, list)):
+ if n != 1 and isinstance(value, (tuple, list)):
pass
else:
value = (value, )
- if isinstance(getter, tuple) or isinstance(value, list):
+ if isinstance(getter, (tuple, list)):
pass
else:
getter = [getter] * n
diff --git a/test/test_1d_solver.py b/test/test_1d_solver.py
index cb85cb9..6a42cfe 100644
--- a/test/test_1d_solver.py
+++ b/test/test_1d_solver.py
@@ -11,7 +11,7 @@ import pytest
import ot
from ot.lp import wasserstein_1d
-from ot.backend import get_backend_list
+from ot.backend import get_backend_list, tf
from scipy.stats import wasserstein_distance
backend_list = get_backend_list()
@@ -86,7 +86,6 @@ def test_wasserstein_1d(nx):
def test_wasserstein_1d_type_devices(nx):
-
rng = np.random.RandomState(0)
n = 10
@@ -108,6 +107,37 @@ def test_wasserstein_1d_type_devices(nx):
nx.assert_same_dtype_device(xb, res)
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_wasserstein_1d_device_tf():
+ if not tf:
+ return
+ nx = ot.backend.TensorflowBackend()
+ rng = np.random.RandomState(0)
+ n = 10
+ x = np.linspace(0, 5, n)
+ rho_u = np.abs(rng.randn(n))
+ rho_u /= rho_u.sum()
+ rho_v = np.abs(rng.randn(n))
+ rho_v /= rho_v.sum()
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ xb = nx.from_numpy(x)
+ rho_ub = nx.from_numpy(rho_u)
+ rho_vb = nx.from_numpy(rho_v)
+ res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
+ nx.assert_same_dtype_device(xb, res)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ xb = nx.from_numpy(x)
+ rho_ub = nx.from_numpy(rho_u)
+ rho_vb = nx.from_numpy(rho_v)
+ res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
+ nx.assert_same_dtype_device(xb, res)
+ assert nx.dtype_device(res)[1].startswith("GPU")
+
+
def test_emd_1d_emd2_1d():
# test emd1d gives similar results as emd
n = 20
@@ -148,7 +178,6 @@ def test_emd_1d_emd2_1d():
def test_emd1d_type_devices(nx):
-
rng = np.random.RandomState(0)
n = 10
@@ -170,3 +199,36 @@ def test_emd1d_type_devices(nx):
nx.assert_same_dtype_device(xb, emd)
nx.assert_same_dtype_device(xb, emd2)
+
+
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_emd1d_device_tf():
+ nx = ot.backend.TensorflowBackend()
+ rng = np.random.RandomState(0)
+ n = 10
+ x = np.linspace(0, 5, n)
+ rho_u = np.abs(rng.randn(n))
+ rho_u /= rho_u.sum()
+ rho_v = np.abs(rng.randn(n))
+ rho_v /= rho_v.sum()
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ xb = nx.from_numpy(x)
+ rho_ub = nx.from_numpy(rho_u)
+ rho_vb = nx.from_numpy(rho_v)
+ emd = ot.emd_1d(xb, xb, rho_ub, rho_vb)
+ emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb)
+ nx.assert_same_dtype_device(xb, emd)
+ nx.assert_same_dtype_device(xb, emd2)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ xb = nx.from_numpy(x)
+ rho_ub = nx.from_numpy(rho_u)
+ rho_vb = nx.from_numpy(rho_v)
+ emd = ot.emd_1d(xb, xb, rho_ub, rho_vb)
+ emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb)
+ nx.assert_same_dtype_device(xb, emd)
+ nx.assert_same_dtype_device(xb, emd2)
+ assert nx.dtype_device(emd)[1].startswith("GPU")
diff --git a/test/test_backend.py b/test/test_backend.py
index 1832b91..027c4cd 100644
--- a/test/test_backend.py
+++ b/test/test_backend.py
@@ -7,7 +7,7 @@
import ot
import ot.backend
-from ot.backend import torch, jax
+from ot.backend import torch, jax, cp, tf
import pytest
@@ -87,6 +87,34 @@ def test_get_backend():
with pytest.raises(ValueError):
get_backend(A, B2)
+ if cp:
+ A2 = cp.asarray(A)
+ B2 = cp.asarray(B)
+
+ nx = get_backend(A2)
+ assert nx.__name__ == 'cupy'
+
+ nx = get_backend(A2, B2)
+ assert nx.__name__ == 'cupy'
+
+ # test not unique types in input
+ with pytest.raises(ValueError):
+ get_backend(A, B2)
+
+ if tf:
+ A2 = tf.convert_to_tensor(A)
+ B2 = tf.convert_to_tensor(B)
+
+ nx = get_backend(A2)
+ assert nx.__name__ == 'tf'
+
+ nx = get_backend(A2, B2)
+ assert nx.__name__ == 'tf'
+
+ # test not unique types in input
+ with pytest.raises(ValueError):
+ get_backend(A, B2)
+
def test_convert_between_backends(nx):
@@ -228,6 +256,14 @@ def test_empty_backend():
nx.copy(M)
with pytest.raises(NotImplementedError):
nx.allclose(M, M)
+ with pytest.raises(NotImplementedError):
+ nx.squeeze(M)
+ with pytest.raises(NotImplementedError):
+ nx.bitsize(M)
+ with pytest.raises(NotImplementedError):
+ nx.device_type(M)
+ with pytest.raises(NotImplementedError):
+ nx._bench(lambda x: x, M, n_runs=1)
def test_func_backends(nx):
@@ -240,7 +276,7 @@ def test_func_backends(nx):
# Sparse tensors test
sp_row = np.array([0, 3, 1, 0, 3])
sp_col = np.array([0, 3, 1, 2, 2])
- sp_data = np.array([4, 5, 7, 9, 0])
+ sp_data = np.array([4, 5, 7, 9, 0], dtype=np.float64)
lst_tot = []
@@ -393,7 +429,8 @@ def test_func_backends(nx):
lst_b.append(nx.to_numpy(A))
lst_name.append('argsort')
- A = nx.searchsorted(Mb, Mb, 'right')
+ tmp = nx.sort(Mb)
+ A = nx.searchsorted(tmp, tmp, 'right')
lst_b.append(nx.to_numpy(A))
lst_name.append('searchsorted')
@@ -476,7 +513,7 @@ def test_func_backends(nx):
lst_name.append('coo_matrix')
assert not nx.issparse(Mb), 'Assert fail on: issparse (expected False)'
- assert nx.issparse(sp_Mb) or nx.__name__ == "jax", 'Assert fail on: issparse (expected True)'
+ assert nx.issparse(sp_Mb) or nx.__name__ in ("jax", "tf"), 'Assert fail on: issparse (expected True)'
A = nx.tocsr(sp_Mb)
lst_b.append(nx.to_numpy(nx.todense(A)))
@@ -501,6 +538,18 @@ def test_func_backends(nx):
assert nx.allclose(Mb, Mb), 'Assert fail on: allclose (expected True)'
assert not nx.allclose(2 * Mb, Mb), 'Assert fail on: allclose (expected False)'
+ A = nx.squeeze(nx.zeros((3, 1, 4, 1)))
+ assert tuple(A.shape) == (3, 4), 'Assert fail on: squeeze'
+
+ A = nx.bitsize(Mb)
+ lst_b.append(float(A))
+ lst_name.append("bitsize")
+
+ A = nx.device_type(Mb)
+ assert A in ("CPU", "GPU")
+
+ nx._bench(lambda x: x, M, n_runs=1)
+
lst_tot.append(lst_b)
lst_np = lst_tot[0]
@@ -575,3 +624,17 @@ def test_gradients_backends():
np.testing.assert_almost_equal(fun(v, c, e), c * np.sum(v ** 4) + e, decimal=4)
np.testing.assert_allclose(grad_val[0], v, atol=1e-4)
np.testing.assert_allclose(grad_val[2], 2 * e, atol=1e-4)
+
+ if tf:
+ nx = ot.backend.TensorflowBackend()
+ w = tf.Variable(tf.random.normal((3, 2)), name='w')
+ b = tf.Variable(tf.random.normal((2,), dtype=tf.float32), name='b')
+ x = tf.random.normal((1, 3), dtype=tf.float32)
+
+ with tf.GradientTape() as tape:
+ y = x @ w + b
+ loss = tf.reduce_mean(y ** 2)
+ manipulated_loss = nx.set_gradients(loss, (w, b), (w, b))
+ [dl_dw, dl_db] = tape.gradient(manipulated_loss, [w, b])
+ assert nx.allclose(dl_dw, w)
+ assert nx.allclose(dl_db, b)
diff --git a/test/test_bregman.py b/test/test_bregman.py
index 830052d..6e90aa4 100644
--- a/test/test_bregman.py
+++ b/test/test_bregman.py
@@ -12,7 +12,7 @@ import numpy as np
import pytest
import ot
-from ot.backend import torch
+from ot.backend import torch, tf
@pytest.mark.parametrize("verbose, warn", product([True, False], [True, False]))
@@ -248,6 +248,7 @@ def test_sinkhorn_empty():
ot.sinkhorn([], [], M, 1, method='greenkhorn', stopThr=1e-10, log=True)
+@pytest.skip_backend('tf')
@pytest.skip_backend("jax")
def test_sinkhorn_variants(nx):
# test sinkhorn
@@ -282,6 +283,8 @@ def test_sinkhorn_variants(nx):
"sinkhorn_epsilon_scaling",
"greenkhorn",
"sinkhorn_log"])
+@pytest.skip_arg(("nx", "method"), ("tf", "sinkhorn_epsilon_scaling"), reason="tf does not support sinkhorn_epsilon_scaling", getter=str)
+@pytest.skip_arg(("nx", "method"), ("tf", "greenkhorn"), reason="tf does not support greenkhorn", getter=str)
@pytest.skip_arg(("nx", "method"), ("jax", "sinkhorn_epsilon_scaling"), reason="jax does not support sinkhorn_epsilon_scaling", getter=str)
@pytest.skip_arg(("nx", "method"), ("jax", "greenkhorn"), reason="jax does not support greenkhorn", getter=str)
def test_sinkhorn_variants_dtype_device(nx, method):
@@ -323,6 +326,36 @@ def test_sinkhorn2_variants_dtype_device(nx, method):
nx.assert_same_dtype_device(Mb, lossb)
+@pytest.mark.skipif(not tf, reason="tf not installed")
+@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"])
+def test_sinkhorn2_variants_device_tf(method):
+ nx = ot.backend.TensorflowBackend()
+ n = 100
+ x = np.random.randn(n, 2)
+ u = ot.utils.unif(n)
+ M = ot.dist(x, x)
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ ub = nx.from_numpy(u)
+ Mb = nx.from_numpy(M)
+ Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
+ lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
+ nx.assert_same_dtype_device(Mb, Gb)
+ nx.assert_same_dtype_device(Mb, lossb)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ ub = nx.from_numpy(u)
+ Mb = nx.from_numpy(M)
+ Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
+ lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
+ nx.assert_same_dtype_device(Mb, Gb)
+ nx.assert_same_dtype_device(Mb, lossb)
+ assert nx.dtype_device(Gb)[1].startswith("GPU")
+
+
+@pytest.skip_backend('tf')
@pytest.skip_backend("jax")
def test_sinkhorn_variants_multi_b(nx):
# test sinkhorn
@@ -352,6 +385,7 @@ def test_sinkhorn_variants_multi_b(nx):
np.testing.assert_allclose(G0, Gs, atol=1e-05)
+@pytest.skip_backend('tf')
@pytest.skip_backend("jax")
def test_sinkhorn2_variants_multi_b(nx):
# test sinkhorn
@@ -454,7 +488,7 @@ def test_barycenter(nx, method, verbose, warn):
weights_nx = nx.from_numpy(weights)
reg = 1e-2
- if nx.__name__ == "jax" and method == "sinkhorn_log":
+ if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.barycenter(A_nx, M_nx, reg, weights, method=method)
else:
@@ -495,7 +529,7 @@ def test_barycenter_debiased(nx, method, verbose, warn):
# wasserstein
reg = 1e-2
- if nx.__name__ == "jax" and method == "sinkhorn_log":
+ if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.barycenter_debiased(A_nx, M_nx, reg, weights, method=method)
else:
@@ -597,7 +631,7 @@ def test_wasserstein_bary_2d(nx, method):
# wasserstein
reg = 1e-2
- if nx.__name__ == "jax" and method == "sinkhorn_log":
+ if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.convolutional_barycenter2d(A_nx, reg, method=method)
else:
@@ -629,7 +663,7 @@ def test_wasserstein_bary_2d_debiased(nx, method):
# wasserstein
reg = 1e-2
- if nx.__name__ == "jax" and method == "sinkhorn_log":
+ if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log":
with pytest.raises(NotImplementedError):
ot.bregman.convolutional_barycenter2d_debiased(A_nx, reg, method=method)
else:
@@ -888,6 +922,8 @@ def test_implemented_methods():
ot.bregman.sinkhorn2(a, b, M, epsilon, method=method)
+@pytest.skip_backend('tf')
+@pytest.skip_backend("cupy")
@pytest.skip_backend("jax")
@pytest.mark.filterwarnings("ignore:Bottleneck")
def test_screenkhorn(nx):
diff --git a/test/test_gromov.py b/test/test_gromov.py
index c4bc04c..4b995d5 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -9,7 +9,7 @@
import numpy as np
import ot
from ot.backend import NumpyBackend
-from ot.backend import torch
+from ot.backend import torch, tf
import pytest
@@ -54,9 +54,12 @@ def test_gromov(nx):
gw, log = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=True)
gwb, logb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=True)
+ gwb = nx.to_numpy(gwb)
gw_val = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=False)
- gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=False)
+ gw_valb = nx.to_numpy(
+ ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=False)
+ )
G = log['T']
Gb = nx.to_numpy(logb['T'])
@@ -110,6 +113,45 @@ def test_gromov_dtype_device(nx):
nx.assert_same_dtype_device(C1b, gw_valb)
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_gromov_device_tf():
+ nx = ot.backend.TensorflowBackend()
+ n_samples = 50 # nb samples
+ mu_s = np.array([0, 0])
+ cov_s = np.array([[1, 0], [0, 1]])
+ xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4)
+ xt = xs[::-1].copy()
+ p = ot.unif(n_samples)
+ q = ot.unif(n_samples)
+ C1 = ot.dist(xs, xs)
+ C2 = ot.dist(xt, xt)
+ C1 /= C1.max()
+ C2 /= C2.max()
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ C1b = nx.from_numpy(C1)
+ C2b = nx.from_numpy(C2)
+ pb = nx.from_numpy(p)
+ qb = nx.from_numpy(q)
+ Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', verbose=True)
+ gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=False)
+ nx.assert_same_dtype_device(C1b, Gb)
+ nx.assert_same_dtype_device(C1b, gw_valb)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ C1b = nx.from_numpy(C1)
+ C2b = nx.from_numpy(C2)
+ pb = nx.from_numpy(p)
+ qb = nx.from_numpy(q)
+ Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', verbose=True)
+ gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=False)
+ nx.assert_same_dtype_device(C1b, Gb)
+ nx.assert_same_dtype_device(C1b, gw_valb)
+ assert nx.dtype_device(Gb)[1].startswith("GPU")
+
+
def test_gromov2_gradients():
n_samples = 50 # nb samples
@@ -147,6 +189,7 @@ def test_gromov2_gradients():
@pytest.skip_backend("jax", reason="test very slow with jax backend")
+@pytest.skip_backend("tf", reason="test very slow with tf backend")
def test_entropic_gromov(nx):
n_samples = 50 # nb samples
@@ -188,6 +231,7 @@ def test_entropic_gromov(nx):
C1, C2, p, q, 'kl_loss', epsilon=1e-2, log=True)
gwb, logb = ot.gromov.entropic_gromov_wasserstein2(
C1b, C2b, pb, qb, 'kl_loss', epsilon=1e-2, log=True)
+ gwb = nx.to_numpy(gwb)
G = log['T']
Gb = nx.to_numpy(logb['T'])
@@ -204,6 +248,7 @@ def test_entropic_gromov(nx):
@pytest.skip_backend("jax", reason="test very slow with jax backend")
+@pytest.skip_backend("tf", reason="test very slow with tf backend")
def test_entropic_gromov_dtype_device(nx):
# setup
n_samples = 50 # nb samples
@@ -287,8 +332,8 @@ def test_pointwise_gromov(nx):
np.testing.assert_allclose(
q, Gb.sum(0), atol=1e-04) # cf convergence gromov
- np.testing.assert_allclose(logb['gw_dist_estimated'], 0.0, atol=1e-08)
- np.testing.assert_allclose(logb['gw_dist_std'], 0.0, atol=1e-08)
+ np.testing.assert_allclose(float(logb['gw_dist_estimated']), 0.0, atol=1e-08)
+ np.testing.assert_allclose(float(logb['gw_dist_std']), 0.0, atol=1e-08)
G, log = ot.gromov.pointwise_gromov_wasserstein(
C1, C2, p, q, loss, max_iter=100, alpha=0.1, log=True, verbose=True, random_state=42)
@@ -298,10 +343,11 @@ def test_pointwise_gromov(nx):
Gb = nx.to_numpy(nx.todense(Gb))
np.testing.assert_allclose(G, Gb, atol=1e-06)
- np.testing.assert_allclose(logb['gw_dist_estimated'], 0.10342276348494964, atol=1e-8)
- np.testing.assert_allclose(logb['gw_dist_std'], 0.0015952535464736394, atol=1e-8)
+ np.testing.assert_allclose(float(logb['gw_dist_estimated']), 0.10342276348494964, atol=1e-8)
+ np.testing.assert_allclose(float(logb['gw_dist_std']), 0.0015952535464736394, atol=1e-8)
+@pytest.skip_backend("tf", reason="test very slow with tf backend")
@pytest.skip_backend("jax", reason="test very slow with jax backend")
def test_sampled_gromov(nx):
n_samples = 50 # nb samples
@@ -346,8 +392,8 @@ def test_sampled_gromov(nx):
np.testing.assert_allclose(
q, Gb.sum(0), atol=1e-04) # cf convergence gromov
- np.testing.assert_allclose(logb['gw_dist_estimated'], 0.05679474884977278, atol=1e-08)
- np.testing.assert_allclose(logb['gw_dist_std'], 0.0005986592106971995, atol=1e-08)
+ np.testing.assert_allclose(float(logb['gw_dist_estimated']), 0.05679474884977278, atol=1e-08)
+ np.testing.assert_allclose(float(logb['gw_dist_std']), 0.0005986592106971995, atol=1e-08)
def test_gromov_barycenter(nx):
@@ -381,6 +427,20 @@ def test_gromov_barycenter(nx):
np.testing.assert_allclose(Cb, Cbb, atol=1e-06)
np.testing.assert_allclose(Cbb.shape, (n_samples, n_samples))
+ # test of gromov_barycenters with `log` on
+ Cb_, err_ = ot.gromov.gromov_barycenters(
+ n_samples, [C1, C2], [p1, p2], p, [.5, .5],
+ 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cbb_, errb_ = ot.gromov.gromov_barycenters(
+ n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
+ 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cbb_ = nx.to_numpy(Cbb_)
+ np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06)
+ np.testing.assert_array_almost_equal(err_['err'], errb_['err'])
+ np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples))
+
Cb2 = ot.gromov.gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
'kl_loss', max_iter=100, tol=1e-3, random_state=42
@@ -392,6 +452,20 @@ def test_gromov_barycenter(nx):
np.testing.assert_allclose(Cb2, Cb2b, atol=1e-06)
np.testing.assert_allclose(Cb2b.shape, (n_samples, n_samples))
+ # test of gromov_barycenters with `log` on
+ Cb2_, err2_ = ot.gromov.gromov_barycenters(
+ n_samples, [C1, C2], [p1, p2], p, [.5, .5],
+ 'kl_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cb2b_, err2b_ = ot.gromov.gromov_barycenters(
+ n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
+ 'kl_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cb2b_ = nx.to_numpy(Cb2b_)
+ np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06)
+ np.testing.assert_array_almost_equal(err2_['err'], err2_['err'])
+ np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples))
+
@pytest.mark.filterwarnings("ignore:divide")
def test_gromov_entropic_barycenter(nx):
@@ -425,6 +499,20 @@ def test_gromov_entropic_barycenter(nx):
np.testing.assert_allclose(Cb, Cbb, atol=1e-06)
np.testing.assert_allclose(Cbb.shape, (n_samples, n_samples))
+ # test of entropic_gromov_barycenters with `log` on
+ Cb_, err_ = ot.gromov.entropic_gromov_barycenters(
+ n_samples, [C1, C2], [p1, p2], p, [.5, .5],
+ 'square_loss', 1e-3, max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cbb_, errb_ = ot.gromov.entropic_gromov_barycenters(
+ n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
+ 'square_loss', 1e-3, max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cbb_ = nx.to_numpy(Cbb_)
+ np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06)
+ np.testing.assert_array_almost_equal(err_['err'], errb_['err'])
+ np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples))
+
Cb2 = ot.gromov.entropic_gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
'kl_loss', 1e-3, max_iter=100, tol=1e-3, random_state=42
@@ -436,6 +524,20 @@ def test_gromov_entropic_barycenter(nx):
np.testing.assert_allclose(Cb2, Cb2b, atol=1e-06)
np.testing.assert_allclose(Cb2b.shape, (n_samples, n_samples))
+ # test of entropic_gromov_barycenters with `log` on
+ Cb2_, err2_ = ot.gromov.entropic_gromov_barycenters(
+ n_samples, [C1, C2], [p1, p2], p, [.5, .5],
+ 'kl_loss', 1e-3, max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cb2b_, err2b_ = ot.gromov.entropic_gromov_barycenters(
+ n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
+ 'kl_loss', 1e-3, max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ )
+ Cb2b_ = nx.to_numpy(Cb2b_)
+ np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06)
+ np.testing.assert_array_almost_equal(err2_['err'], err2_['err'])
+ np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples))
+
def test_fgw(nx):
n_samples = 50 # nb samples
@@ -486,6 +588,7 @@ def test_fgw(nx):
fgw, log = ot.gromov.fused_gromov_wasserstein2(M, C1, C2, p, q, 'square_loss', alpha=0.5, log=True)
fgwb, logb = ot.gromov.fused_gromov_wasserstein2(Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, log=True)
+ fgwb = nx.to_numpy(fgwb)
G = log['T']
Gb = nx.to_numpy(logb['T'])
diff --git a/test/test_optim.py b/test/test_optim.py
index 4efd9b1..41f9cbe 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -142,7 +142,7 @@ def test_line_search_armijo(nx):
pk = np.array([[-0.25, 0.25], [0.25, -0.25]])
gfk = np.array([[23.04273441, 23.0449082], [23.04273441, 23.0449082]])
old_fval = -123
- # Should not throw an exception and return None for alpha
+ # Should not throw an exception and return 0. for alpha
alpha, a, b = ot.optim.line_search_armijo(
lambda x: 1, nx.from_numpy(xk), nx.from_numpy(pk), nx.from_numpy(gfk), old_fval
)
@@ -151,7 +151,7 @@ def test_line_search_armijo(nx):
)
assert a == anp
assert b == bnp
- assert alpha is None
+ assert alpha == 0.
# check line search armijo
def f(x):
diff --git a/test/test_ot.py b/test/test_ot.py
index 92f26a7..53edf4f 100644
--- a/test/test_ot.py
+++ b/test/test_ot.py
@@ -11,7 +11,7 @@ import pytest
import ot
from ot.datasets import make_1D_gauss as gauss
-from ot.backend import torch
+from ot.backend import torch, tf
def test_emd_dimension_and_mass_mismatch():
@@ -101,6 +101,40 @@ def test_emd_emd2_types_devices(nx):
nx.assert_same_dtype_device(Mb, w)
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_emd_emd2_devices_tf():
+ if not tf:
+ return
+ nx = ot.backend.TensorflowBackend()
+
+ n_samples = 100
+ n_features = 2
+ rng = np.random.RandomState(0)
+ x = rng.randn(n_samples, n_features)
+ y = rng.randn(n_samples, n_features)
+ a = ot.utils.unif(n_samples)
+ M = ot.dist(x, y)
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ ab = nx.from_numpy(a)
+ Mb = nx.from_numpy(M)
+ Gb = ot.emd(ab, ab, Mb)
+ w = ot.emd2(ab, ab, Mb)
+ nx.assert_same_dtype_device(Mb, Gb)
+ nx.assert_same_dtype_device(Mb, w)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ ab = nx.from_numpy(a)
+ Mb = nx.from_numpy(M)
+ Gb = ot.emd(ab, ab, Mb)
+ w = ot.emd2(ab, ab, Mb)
+ nx.assert_same_dtype_device(Mb, Gb)
+ nx.assert_same_dtype_device(Mb, w)
+ assert nx.dtype_device(Gb)[1].startswith("GPU")
+
+
def test_emd2_gradients():
n_samples = 100
n_features = 2
@@ -126,6 +160,22 @@ def test_emd2_gradients():
assert b1.shape == b1.grad.shape
assert M1.shape == M1.grad.shape
+ # Testing for bug #309, checking for scaling of gradient
+ a2 = torch.tensor(a, requires_grad=True)
+ b2 = torch.tensor(a, requires_grad=True)
+ M2 = torch.tensor(M, requires_grad=True)
+
+ val = 10.0 * ot.emd2(a2, b2, M2)
+
+ val.backward()
+
+ assert np.allclose(10.0 * a1.grad.cpu().detach().numpy(),
+ a2.grad.cpu().detach().numpy())
+ assert np.allclose(10.0 * b1.grad.cpu().detach().numpy(),
+ b2.grad.cpu().detach().numpy())
+ assert np.allclose(10.0 * M1.grad.cpu().detach().numpy(),
+ M2.grad.cpu().detach().numpy())
+
def test_emd_emd2():
# test emd and emd2 for simple identity
diff --git a/test/test_sliced.py b/test/test_sliced.py
index 245202c..91e0961 100644
--- a/test/test_sliced.py
+++ b/test/test_sliced.py
@@ -10,6 +10,7 @@ import pytest
import ot
from ot.sliced import get_random_projections
+from ot.backend import tf
def test_get_random_projections():
@@ -161,6 +162,34 @@ def test_sliced_backend_type_devices(nx):
nx.assert_same_dtype_device(xb, valb)
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_sliced_backend_device_tf():
+ nx = ot.backend.TensorflowBackend()
+ n = 100
+ rng = np.random.RandomState(0)
+ x = rng.randn(n, 2)
+ y = rng.randn(2 * n, 2)
+ P = rng.randn(2, 20)
+ P = P / np.sqrt((P**2).sum(0, keepdims=True))
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ xb = nx.from_numpy(x)
+ yb = nx.from_numpy(y)
+ Pb = nx.from_numpy(P)
+ valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb)
+ nx.assert_same_dtype_device(xb, valb)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ xb = nx.from_numpy(x)
+ yb = nx.from_numpy(y)
+ Pb = nx.from_numpy(P)
+ valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb)
+ nx.assert_same_dtype_device(xb, valb)
+ assert nx.dtype_device(valb)[1].startswith("GPU")
+
+
def test_max_sliced_backend(nx):
n = 100
@@ -211,3 +240,31 @@ def test_max_sliced_backend_type_devices(nx):
valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
nx.assert_same_dtype_device(xb, valb)
+
+
+@pytest.mark.skipif(not tf, reason="tf not installed")
+def test_max_sliced_backend_device_tf():
+ nx = ot.backend.TensorflowBackend()
+ n = 100
+ rng = np.random.RandomState(0)
+ x = rng.randn(n, 2)
+ y = rng.randn(2 * n, 2)
+ P = rng.randn(2, 20)
+ P = P / np.sqrt((P**2).sum(0, keepdims=True))
+
+ # Check that everything stays on the CPU
+ with tf.device("/CPU:0"):
+ xb = nx.from_numpy(x)
+ yb = nx.from_numpy(y)
+ Pb = nx.from_numpy(P)
+ valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
+ nx.assert_same_dtype_device(xb, valb)
+
+ if len(tf.config.list_physical_devices('GPU')) > 0:
+ # Check that everything happens on the GPU
+ xb = nx.from_numpy(x)
+ yb = nx.from_numpy(y)
+ Pb = nx.from_numpy(P)
+ valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
+ nx.assert_same_dtype_device(xb, valb)
+ assert nx.dtype_device(valb)[1].startswith("GPU")
diff --git a/test/test_utils.py b/test/test_utils.py
index 40f4e49..6b476b2 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -117,6 +117,26 @@ def test_dist():
np.testing.assert_allclose(D, D2, atol=1e-14)
np.testing.assert_allclose(D, D3, atol=1e-14)
+ # tests that every metric runs correctly
+ metrics_w = [
+ 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice',
+ 'euclidean', 'hamming', 'jaccard', 'kulsinski',
+ 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
+ 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'
+ ] # those that support weights
+ metrics = ['mahalanobis', 'seuclidean'] # do not support weights depending on scipy's version
+
+ for metric in metrics_w:
+ print(metric)
+ ot.dist(x, x, metric=metric, p=3, w=np.random.random((2, )))
+ for metric in metrics:
+ print(metric)
+ ot.dist(x, x, metric=metric, p=3)
+
+ # weighted minkowski but with no weights
+ with pytest.raises(ValueError):
+ ot.dist(x, x, metric="wminkowski")
+
def test_dist_backends(nx):