summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorNathan Cassereau <84033440+ncassereau-idris@users.noreply.github.com>2022-03-24 10:53:47 +0100
committerGitHub <noreply@github.com>2022-03-24 10:53:47 +0100
commit767171593f2a98a26b9a39bf110a45085e3b982e (patch)
tree4eb4bcc657efc53a65c3fb4439bd0e0e106b6745 /test
parent9b9d2221d257f40ea3eb58b279b30d69162d62bb (diff)
[MRG] Domain adaptation and unbalanced solvers with backend support (#343)
* First draft * Add matrix inverse and square root to backend * Eigen decomposition for older versions of pytorch (1.8.1 and older) * Corrected eigen decomposition for pytorch 1.8.1 and older * Spectral theorem is a thing * Optimization * small optimization * More functions converted * pep8 * remove a warning and prepare torch meshgrid for future torch release (which will change default indexing) * dots and pep8 * Meshgrid corrected for older version and prepared for future versions changes * New backend functions * Base transport * LinearTransport * All transport classes + pep8 * PR added to release file * Jcpot barycenter test * unbalanced with backend * pep8 * bug solve * test of domain adaptation with backends * solve bug for tic toc & macos * solving scipy deprecation warning * solving scipy deprecation warning attempt2 * solving scipy deprecation warning attempt3 * A warning is triggered when a float->int conversion is detected * bug solve * docs * release file updated * Better handling of float->int conversion in EMD * Corrected test for is_floating_point * docs * release file updated * cupy does not allow implicit cast * fromnumpy * added test * test da tf jax * test unbalanced with no provided histogram * using type_as argument in unif function correctly * pep8 * transport plan cast in emd changed behaviour, now trying to cast as histogram's dtype, defaulting to cost matrix Co-authored-by: RĂ©mi Flamary <remi.flamary@gmail.com>
Diffstat (limited to 'test')
-rw-r--r--test/test_1d_solver.py28
-rw-r--r--test/test_backend.py66
-rw-r--r--test/test_bregman.py81
-rw-r--r--test/test_da.py307
-rw-r--r--test/test_gromov.py147
-rw-r--r--test/test_optim.py17
-rw-r--r--test/test_ot.py19
-rw-r--r--test/test_sliced.py32
-rw-r--r--test/test_unbalanced.py157
-rw-r--r--test/test_weak.py4
10 files changed, 414 insertions, 444 deletions
diff --git a/test/test_1d_solver.py b/test/test_1d_solver.py
index 6a42cfe..20f307a 100644
--- a/test/test_1d_solver.py
+++ b/test/test_1d_solver.py
@@ -66,9 +66,7 @@ def test_wasserstein_1d(nx):
rho_v = np.abs(rng.randn(n))
rho_v /= rho_v.sum()
- xb = nx.from_numpy(x)
- rho_ub = nx.from_numpy(rho_u)
- rho_vb = nx.from_numpy(rho_v)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v)
# test 1 : wasserstein_1d should be close to scipy W_1 implementation
np.testing.assert_almost_equal(wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1),
@@ -98,9 +96,7 @@ def test_wasserstein_1d_type_devices(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- xb = nx.from_numpy(x, type_as=tp)
- rho_ub = nx.from_numpy(rho_u, type_as=tp)
- rho_vb = nx.from_numpy(rho_v, type_as=tp)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v, type_as=tp)
res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
@@ -122,17 +118,13 @@ def test_wasserstein_1d_device_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- xb = nx.from_numpy(x)
- rho_ub = nx.from_numpy(rho_u)
- rho_vb = nx.from_numpy(rho_v)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v)
res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
nx.assert_same_dtype_device(xb, res)
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- xb = nx.from_numpy(x)
- rho_ub = nx.from_numpy(rho_u)
- rho_vb = nx.from_numpy(rho_v)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v)
res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
nx.assert_same_dtype_device(xb, res)
assert nx.dtype_device(res)[1].startswith("GPU")
@@ -190,9 +182,7 @@ def test_emd1d_type_devices(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- xb = nx.from_numpy(x, type_as=tp)
- rho_ub = nx.from_numpy(rho_u, type_as=tp)
- rho_vb = nx.from_numpy(rho_v, type_as=tp)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v, type_as=tp)
emd = ot.emd_1d(xb, xb, rho_ub, rho_vb)
emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb)
@@ -214,9 +204,7 @@ def test_emd1d_device_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- xb = nx.from_numpy(x)
- rho_ub = nx.from_numpy(rho_u)
- rho_vb = nx.from_numpy(rho_v)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v)
emd = ot.emd_1d(xb, xb, rho_ub, rho_vb)
emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb)
nx.assert_same_dtype_device(xb, emd)
@@ -224,9 +212,7 @@ def test_emd1d_device_tf():
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- xb = nx.from_numpy(x)
- rho_ub = nx.from_numpy(rho_u)
- rho_vb = nx.from_numpy(rho_v)
+ xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v)
emd = ot.emd_1d(xb, xb, rho_ub, rho_vb)
emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb)
nx.assert_same_dtype_device(xb, emd)
diff --git a/test/test_backend.py b/test/test_backend.py
index 027c4cd..311c075 100644
--- a/test/test_backend.py
+++ b/test/test_backend.py
@@ -218,6 +218,8 @@ def test_empty_backend():
with pytest.raises(NotImplementedError):
nx.argmax(M)
with pytest.raises(NotImplementedError):
+ nx.argmin(M)
+ with pytest.raises(NotImplementedError):
nx.mean(M)
with pytest.raises(NotImplementedError):
nx.std(M)
@@ -264,12 +266,27 @@ def test_empty_backend():
nx.device_type(M)
with pytest.raises(NotImplementedError):
nx._bench(lambda x: x, M, n_runs=1)
+ with pytest.raises(NotImplementedError):
+ nx.solve(M, v)
+ with pytest.raises(NotImplementedError):
+ nx.trace(M)
+ with pytest.raises(NotImplementedError):
+ nx.inv(M)
+ with pytest.raises(NotImplementedError):
+ nx.sqrtm(M)
+ with pytest.raises(NotImplementedError):
+ nx.isfinite(M)
+ with pytest.raises(NotImplementedError):
+ nx.array_equal(M, M)
+ with pytest.raises(NotImplementedError):
+ nx.is_floating_point(M)
def test_func_backends(nx):
rnd = np.random.RandomState(0)
M = rnd.randn(10, 3)
+ SquareM = rnd.randn(10, 10)
v = rnd.randn(3)
val = np.array([1.0])
@@ -288,6 +305,7 @@ def test_func_backends(nx):
lst_name = []
Mb = nx.from_numpy(M)
+ SquareMb = nx.from_numpy(SquareM)
vb = nx.from_numpy(v)
val = nx.from_numpy(val)
@@ -467,6 +485,10 @@ def test_func_backends(nx):
lst_b.append(nx.to_numpy(A))
lst_name.append('argmax')
+ A = nx.argmin(Mb)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append('argmin')
+
A = nx.mean(Mb)
lst_b.append(nx.to_numpy(A))
lst_name.append('mean')
@@ -529,7 +551,11 @@ def test_func_backends(nx):
A = nx.where(Mb >= nx.stack([nx.linspace(0, 1, 10)] * 3, axis=1), Mb, 0.0)
lst_b.append(nx.to_numpy(A))
- lst_name.append('where')
+ lst_name.append('where (cond, x, y)')
+
+ A = nx.where(nx.from_numpy(np.array([True, False])))
+ lst_b.append(nx.to_numpy(nx.stack(A)))
+ lst_name.append('where (cond)')
A = nx.copy(Mb)
lst_b.append(nx.to_numpy(A))
@@ -550,15 +576,47 @@ def test_func_backends(nx):
nx._bench(lambda x: x, M, n_runs=1)
+ A = nx.solve(SquareMb, Mb)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append('solve')
+
+ A = nx.trace(SquareMb)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append('trace')
+
+ A = nx.inv(SquareMb)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append('matrix inverse')
+
+ A = nx.sqrtm(SquareMb.T @ SquareMb)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append("matrix square root")
+
+ A = nx.concatenate([vb, nx.from_numpy(np.array([np.inf, np.nan]))], axis=0)
+ A = nx.isfinite(A)
+ lst_b.append(nx.to_numpy(A))
+ lst_name.append("isfinite")
+
+ assert not nx.array_equal(Mb, vb), "array_equal (shape)"
+ assert nx.array_equal(Mb, Mb), "array_equal (elements) - expected true"
+ assert not nx.array_equal(
+ Mb, Mb + nx.eye(*list(Mb.shape))
+ ), "array_equal (elements) - expected false"
+
+ assert nx.is_floating_point(Mb), "is_floating_point - expected true"
+ assert not nx.is_floating_point(
+ nx.from_numpy(np.array([0, 1, 2], dtype=int))
+ ), "is_floating_point - expected false"
+
lst_tot.append(lst_b)
lst_np = lst_tot[0]
lst_b = lst_tot[1]
for a1, a2, name in zip(lst_np, lst_b, lst_name):
- if not np.allclose(a1, a2):
- print('Assert fail on: ', name)
- assert np.allclose(a1, a2, atol=1e-7)
+ np.testing.assert_allclose(
+ a2, a1, atol=1e-7, err_msg=f'ASSERT FAILED ON: {name}'
+ )
def test_random_backends(nx):
diff --git a/test/test_bregman.py b/test/test_bregman.py
index 1419f9b..6c37984 100644
--- a/test/test_bregman.py
+++ b/test/test_bregman.py
@@ -155,8 +155,7 @@ def test_sinkhorn_backends(nx):
G = ot.sinkhorn(a, a, M, 1)
- ab = nx.from_numpy(a)
- M_nx = nx.from_numpy(M)
+ ab, M_nx = nx.from_numpy(a, M)
Gb = ot.sinkhorn(ab, ab, M_nx, 1)
@@ -176,8 +175,7 @@ def test_sinkhorn2_backends(nx):
G = ot.sinkhorn(a, a, M, 1)
- ab = nx.from_numpy(a)
- M_nx = nx.from_numpy(M)
+ ab, M_nx = nx.from_numpy(a, M)
Gb = ot.sinkhorn2(ab, ab, M_nx, 1)
@@ -260,8 +258,7 @@ def test_sinkhorn_variants(nx):
M = ot.dist(x, x)
- ub = nx.from_numpy(u)
- M_nx = nx.from_numpy(M)
+ ub, M_nx = nx.from_numpy(u, M)
G = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn(ub, ub, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
@@ -298,8 +295,7 @@ def test_sinkhorn_variants_dtype_device(nx, method):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- ub = nx.from_numpy(u, type_as=tp)
- Mb = nx.from_numpy(M, type_as=tp)
+ ub, Mb = nx.from_numpy(u, M, type_as=tp)
Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
@@ -318,8 +314,7 @@ def test_sinkhorn2_variants_dtype_device(nx, method):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- ub = nx.from_numpy(u, type_as=tp)
- Mb = nx.from_numpy(M, type_as=tp)
+ ub, Mb = nx.from_numpy(u, M, type_as=tp)
lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
@@ -337,8 +332,7 @@ def test_sinkhorn2_variants_device_tf(method):
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- ub = nx.from_numpy(u)
- Mb = nx.from_numpy(M)
+ ub, Mb = nx.from_numpy(u, M)
Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
nx.assert_same_dtype_device(Mb, Gb)
@@ -346,8 +340,7 @@ def test_sinkhorn2_variants_device_tf(method):
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- ub = nx.from_numpy(u)
- Mb = nx.from_numpy(M)
+ ub, Mb = nx.from_numpy(u, M)
Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
nx.assert_same_dtype_device(Mb, Gb)
@@ -370,9 +363,7 @@ def test_sinkhorn_variants_multi_b(nx):
M = ot.dist(x, x)
- ub = nx.from_numpy(u)
- bb = nx.from_numpy(b)
- M_nx = nx.from_numpy(M)
+ ub, bb, M_nx = nx.from_numpy(u, b, M)
G = ot.sinkhorn(u, b, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn(ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
@@ -400,9 +391,7 @@ def test_sinkhorn2_variants_multi_b(nx):
M = ot.dist(x, x)
- ub = nx.from_numpy(u)
- bb = nx.from_numpy(b)
- M_nx = nx.from_numpy(M)
+ ub, bb, M_nx = nx.from_numpy(u, b, M)
G = ot.sinkhorn2(u, b, M, 1, method='sinkhorn', stopThr=1e-10)
Gl = nx.to_numpy(ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
@@ -483,9 +472,7 @@ def test_barycenter(nx, method, verbose, warn):
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
- A_nx = nx.from_numpy(A)
- M_nx = nx.from_numpy(M)
- weights_nx = nx.from_numpy(weights)
+ A_nx, M_nx, weights_nx = nx.from_numpy(A, M, weights)
reg = 1e-2
if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log":
@@ -523,9 +510,7 @@ def test_barycenter_debiased(nx, method, verbose, warn):
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
- A_nx = nx.from_numpy(A)
- M_nx = nx.from_numpy(M)
- weights_nx = nx.from_numpy(weights)
+ A_nx, M_nx, weights_nx = nx.from_numpy(A, M, weights)
# wasserstein
reg = 1e-2
@@ -594,9 +579,7 @@ def test_barycenter_stabilization(nx):
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
- A_nx = nx.from_numpy(A)
- M_nx = nx.from_numpy(M)
- weights_b = nx.from_numpy(weights)
+ A_nx, M_nx, weights_b = nx.from_numpy(A, M, weights)
# wasserstein
reg = 1e-2
@@ -697,11 +680,7 @@ def test_unmix(nx):
M0 /= M0.max()
h0 = ot.unif(2)
- ab = nx.from_numpy(a)
- Db = nx.from_numpy(D)
- M_nx = nx.from_numpy(M)
- M0b = nx.from_numpy(M0)
- h0b = nx.from_numpy(h0)
+ ab, Db, M_nx, M0b, h0b = nx.from_numpy(a, D, M, M0, h0)
# wasserstein
reg = 1e-3
@@ -727,12 +706,7 @@ def test_empirical_sinkhorn(nx):
M = ot.dist(X_s, X_t)
M_m = ot.dist(X_s, X_t, metric='euclidean')
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- X_sb = nx.from_numpy(X_s)
- X_tb = nx.from_numpy(X_t)
- M_nx = nx.from_numpy(M, type_as=ab)
- M_mb = nx.from_numpy(M_m, type_as=ab)
+ ab, bb, X_sb, X_tb, M_nx, M_mb = nx.from_numpy(a, b, X_s, X_t, M, M_m)
G_sqe = nx.to_numpy(ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1))
sinkhorn_sqe = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1))
@@ -776,12 +750,7 @@ def test_lazy_empirical_sinkhorn(nx):
M = ot.dist(X_s, X_t)
M_m = ot.dist(X_s, X_t, metric='euclidean')
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- X_sb = nx.from_numpy(X_s)
- X_tb = nx.from_numpy(X_t)
- M_nx = nx.from_numpy(M, type_as=ab)
- M_mb = nx.from_numpy(M_m, type_as=ab)
+ ab, bb, X_sb, X_tb, M_nx, M_mb = nx.from_numpy(a, b, X_s, X_t, M, M_m)
f, g = ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1, numIterMax=numIterMax, isLazy=True, batchSize=(1, 3), verbose=True)
f, g = nx.to_numpy(f), nx.to_numpy(g)
@@ -825,19 +794,13 @@ def test_empirical_sinkhorn_divergence(nx):
a = np.linspace(1, n, n)
a /= a.sum()
b = ot.unif(n)
- X_s = np.reshape(np.arange(n), (n, 1))
- X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1))
+ X_s = np.reshape(np.arange(n, dtype=np.float64), (n, 1))
+ X_t = np.reshape(np.arange(0, n * 2, 2, dtype=np.float64), (n, 1))
M = ot.dist(X_s, X_t)
M_s = ot.dist(X_s, X_s)
M_t = ot.dist(X_t, X_t)
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- X_sb = nx.from_numpy(X_s)
- X_tb = nx.from_numpy(X_t)
- M_nx = nx.from_numpy(M, type_as=ab)
- M_sb = nx.from_numpy(M_s, type_as=ab)
- M_tb = nx.from_numpy(M_t, type_as=ab)
+ ab, bb, X_sb, X_tb, M_nx, M_sb, M_tb = nx.from_numpy(a, b, X_s, X_t, M, M_s, M_t)
emp_sinkhorn_div = nx.to_numpy(ot.bregman.empirical_sinkhorn_divergence(X_sb, X_tb, 1, a=ab, b=bb))
sinkhorn_div = nx.to_numpy(
@@ -872,9 +835,7 @@ def test_stabilized_vs_sinkhorn_multidim(nx):
M /= np.median(M)
epsilon = 0.1
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- M_nx = nx.from_numpy(M, type_as=ab)
+ ab, bb, M_nx = nx.from_numpy(a, b, M)
G_np, _ = ot.bregman.sinkhorn(a, b, M, reg=epsilon, method="sinkhorn", log=True)
G, log = ot.bregman.sinkhorn(ab, bb, M_nx, reg=epsilon,
@@ -936,9 +897,7 @@ def test_screenkhorn(nx):
x = rng.randn(n, 2)
M = ot.dist(x, x)
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- M_nx = nx.from_numpy(M, type_as=ab)
+ ab, bb, M_nx = nx.from_numpy(a, b, M)
# sinkhorn
G_sink = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1e-1))
diff --git a/test/test_da.py b/test/test_da.py
index 9f2bb50..4bf0ab1 100644
--- a/test/test_da.py
+++ b/test/test_da.py
@@ -19,7 +19,32 @@ except ImportError:
nosklearn = True
-def test_sinkhorn_lpl1_transport_class():
+def test_class_jax_tf():
+ backends = []
+ from ot.backend import jax, tf
+ if jax:
+ backends.append(ot.backend.JaxBackend())
+ if tf:
+ backends.append(ot.backend.TensorflowBackend())
+
+ for nx in backends:
+ ns = 150
+ nt = 200
+
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
+
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
+ otda = ot.da.SinkhornLpl1Transport()
+
+ with pytest.raises(TypeError):
+ otda.fit(Xs=Xs, ys=ys, Xt=Xt)
+
+
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_sinkhorn_lpl1_transport_class(nx):
"""test_sinkhorn_transport
"""
@@ -29,6 +54,8 @@ def test_sinkhorn_lpl1_transport_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.SinkhornLpl1Transport()
# test its computed
@@ -44,15 +71,15 @@ def test_sinkhorn_lpl1_transport_class():
mu_s = unif(ns)
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -62,7 +89,7 @@ def test_sinkhorn_lpl1_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -85,24 +112,26 @@ def test_sinkhorn_lpl1_transport_class():
# test unsupervised vs semi-supervised mode
otda_unsup = ot.da.SinkhornLpl1Transport()
otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)
- n_unsup = np.sum(otda_unsup.cost_)
+ n_unsup = nx.sum(otda_unsup.cost_)
otda_semi = ot.da.SinkhornLpl1Transport()
otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)
assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))
- n_semisup = np.sum(otda_semi.cost_)
+ n_semisup = nx.sum(otda_semi.cost_)
# check that the cost matrix norms are indeed different
assert n_unsup != n_semisup, "semisupervised mode not working"
# check that the coupling forbids mass transport between labeled source
# and labeled target samples
- mass_semi = np.sum(
+ mass_semi = nx.sum(
otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])
assert mass_semi == 0, "semisupervised mode not working"
-def test_sinkhorn_l1l2_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_sinkhorn_l1l2_transport_class(nx):
"""test_sinkhorn_transport
"""
@@ -112,6 +141,8 @@ def test_sinkhorn_l1l2_transport_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.SinkhornL1l2Transport()
# test its computed
@@ -128,15 +159,15 @@ def test_sinkhorn_l1l2_transport_class():
mu_s = unif(ns)
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -156,7 +187,7 @@ def test_sinkhorn_l1l2_transport_class():
assert_equal(transp_ys.shape[0], ys.shape[0])
assert_equal(transp_ys.shape[1], len(np.unique(yt)))
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -169,22 +200,22 @@ def test_sinkhorn_l1l2_transport_class():
# test unsupervised vs semi-supervised mode
otda_unsup = ot.da.SinkhornL1l2Transport()
otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)
- n_unsup = np.sum(otda_unsup.cost_)
+ n_unsup = nx.sum(otda_unsup.cost_)
otda_semi = ot.da.SinkhornL1l2Transport()
otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)
assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))
- n_semisup = np.sum(otda_semi.cost_)
+ n_semisup = nx.sum(otda_semi.cost_)
# check that the cost matrix norms are indeed different
assert n_unsup != n_semisup, "semisupervised mode not working"
# check that the coupling forbids mass transport between labeled source
# and labeled target samples
- mass_semi = np.sum(
+ mass_semi = nx.sum(
otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])
mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]
- assert_allclose(mass_semi, np.zeros_like(mass_semi),
+ assert_allclose(nx.to_numpy(mass_semi), np.zeros(list(mass_semi.shape)),
rtol=1e-9, atol=1e-9)
# check everything runs well with log=True
@@ -193,7 +224,9 @@ def test_sinkhorn_l1l2_transport_class():
assert len(otda.log_.keys()) != 0
-def test_sinkhorn_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_sinkhorn_transport_class(nx):
"""test_sinkhorn_transport
"""
@@ -203,6 +236,8 @@ def test_sinkhorn_transport_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.SinkhornTransport()
# test its computed
@@ -219,15 +254,15 @@ def test_sinkhorn_transport_class():
mu_s = unif(ns)
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -247,7 +282,7 @@ def test_sinkhorn_transport_class():
assert_equal(transp_ys.shape[0], ys.shape[0])
assert_equal(transp_ys.shape[1], len(np.unique(yt)))
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -260,19 +295,19 @@ def test_sinkhorn_transport_class():
# test unsupervised vs semi-supervised mode
otda_unsup = ot.da.SinkhornTransport()
otda_unsup.fit(Xs=Xs, Xt=Xt)
- n_unsup = np.sum(otda_unsup.cost_)
+ n_unsup = nx.sum(otda_unsup.cost_)
otda_semi = ot.da.SinkhornTransport()
otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)
assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))
- n_semisup = np.sum(otda_semi.cost_)
+ n_semisup = nx.sum(otda_semi.cost_)
# check that the cost matrix norms are indeed different
assert n_unsup != n_semisup, "semisupervised mode not working"
# check that the coupling forbids mass transport between labeled source
# and labeled target samples
- mass_semi = np.sum(
+ mass_semi = nx.sum(
otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])
assert mass_semi == 0, "semisupervised mode not working"
@@ -282,7 +317,9 @@ def test_sinkhorn_transport_class():
assert len(otda.log_.keys()) != 0
-def test_unbalanced_sinkhorn_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_unbalanced_sinkhorn_transport_class(nx):
"""test_sinkhorn_transport
"""
@@ -292,6 +329,8 @@ def test_unbalanced_sinkhorn_transport_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.UnbalancedSinkhornTransport()
# test its computed
@@ -318,7 +357,7 @@ def test_unbalanced_sinkhorn_transport_class():
assert_equal(transp_ys.shape[0], ys.shape[0])
assert_equal(transp_ys.shape[1], len(np.unique(yt)))
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -328,7 +367,7 @@ def test_unbalanced_sinkhorn_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -341,12 +380,12 @@ def test_unbalanced_sinkhorn_transport_class():
# test unsupervised vs semi-supervised mode
otda_unsup = ot.da.SinkhornTransport()
otda_unsup.fit(Xs=Xs, Xt=Xt)
- n_unsup = np.sum(otda_unsup.cost_)
+ n_unsup = nx.sum(otda_unsup.cost_)
otda_semi = ot.da.SinkhornTransport()
otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)
assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))
- n_semisup = np.sum(otda_semi.cost_)
+ n_semisup = nx.sum(otda_semi.cost_)
# check that the cost matrix norms are indeed different
assert n_unsup != n_semisup, "semisupervised mode not working"
@@ -357,7 +396,9 @@ def test_unbalanced_sinkhorn_transport_class():
assert len(otda.log_.keys()) != 0
-def test_emd_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_emd_transport_class(nx):
"""test_sinkhorn_transport
"""
@@ -367,6 +408,8 @@ def test_emd_transport_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.EMDTransport()
# test its computed
@@ -382,15 +425,15 @@ def test_emd_transport_class():
mu_s = unif(ns)
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -410,7 +453,7 @@ def test_emd_transport_class():
assert_equal(transp_ys.shape[0], ys.shape[0])
assert_equal(transp_ys.shape[1], len(np.unique(yt)))
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -423,28 +466,32 @@ def test_emd_transport_class():
# test unsupervised vs semi-supervised mode
otda_unsup = ot.da.EMDTransport()
otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)
- n_unsup = np.sum(otda_unsup.cost_)
+ n_unsup = nx.sum(otda_unsup.cost_)
otda_semi = ot.da.EMDTransport()
otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)
assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))
- n_semisup = np.sum(otda_semi.cost_)
+ n_semisup = nx.sum(otda_semi.cost_)
# check that the cost matrix norms are indeed different
assert n_unsup != n_semisup, "semisupervised mode not working"
# check that the coupling forbids mass transport between labeled source
# and labeled target samples
- mass_semi = np.sum(
+ mass_semi = nx.sum(
otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])
mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]
# we need to use a small tolerance here, otherwise the test breaks
- assert_allclose(mass_semi, np.zeros_like(mass_semi),
+ assert_allclose(nx.to_numpy(mass_semi), np.zeros(list(mass_semi.shape)),
rtol=1e-2, atol=1e-2)
-def test_mapping_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+@pytest.mark.parametrize("kernel", ["linear", "gaussian"])
+@pytest.mark.parametrize("bias", ["unbiased", "biased"])
+def test_mapping_transport_class(nx, kernel, bias):
"""test_mapping_transport
"""
@@ -455,101 +502,29 @@ def test_mapping_transport_class():
Xt, yt = make_data_classif('3gauss2', nt)
Xs_new, _ = make_data_classif('3gauss', ns + 1)
- ##########################################################################
- # kernel == linear mapping tests
- ##########################################################################
+ Xs, Xt, Xs_new = nx.from_numpy(Xs, Xt, Xs_new)
- # check computation and dimensions if bias == False
- otda = ot.da.MappingTransport(kernel="linear", bias=False)
+ # Mapping tests
+ bias = bias == "biased"
+ otda = ot.da.MappingTransport(kernel=kernel, bias=bias)
otda.fit(Xs=Xs, Xt=Xt)
assert hasattr(otda, "coupling_")
assert hasattr(otda, "mapping_")
assert hasattr(otda, "log_")
assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))
- assert_equal(otda.mapping_.shape, ((Xs.shape[1], Xt.shape[1])))
+ S = Xs.shape[0] if kernel == "gaussian" else Xs.shape[1] # if linear
+ if bias:
+ S += 1
+ assert_equal(otda.mapping_.shape, ((S, Xt.shape[1])))
# test margin constraints
mu_s = unif(ns)
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
-
- # test transform
- transp_Xs = otda.transform(Xs=Xs)
- assert_equal(transp_Xs.shape, Xs.shape)
-
- transp_Xs_new = otda.transform(Xs_new)
-
- # check that the oos method is working
- assert_equal(transp_Xs_new.shape, Xs_new.shape)
-
- # check computation and dimensions if bias == True
- otda = ot.da.MappingTransport(kernel="linear", bias=True)
- otda.fit(Xs=Xs, Xt=Xt)
- assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))
- assert_equal(otda.mapping_.shape, ((Xs.shape[1] + 1, Xt.shape[1])))
-
- # test margin constraints
- mu_s = unif(ns)
- mu_t = unif(nt)
- assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
- assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
-
- # test transform
- transp_Xs = otda.transform(Xs=Xs)
- assert_equal(transp_Xs.shape, Xs.shape)
-
- transp_Xs_new = otda.transform(Xs_new)
-
- # check that the oos method is working
- assert_equal(transp_Xs_new.shape, Xs_new.shape)
-
- ##########################################################################
- # kernel == gaussian mapping tests
- ##########################################################################
-
- # check computation and dimensions if bias == False
- otda = ot.da.MappingTransport(kernel="gaussian", bias=False)
- otda.fit(Xs=Xs, Xt=Xt)
-
- assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))
- assert_equal(otda.mapping_.shape, ((Xs.shape[0], Xt.shape[1])))
-
- # test margin constraints
- mu_s = unif(ns)
- mu_t = unif(nt)
- assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
- assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
-
- # test transform
- transp_Xs = otda.transform(Xs=Xs)
- assert_equal(transp_Xs.shape, Xs.shape)
-
- transp_Xs_new = otda.transform(Xs_new)
-
- # check that the oos method is working
- assert_equal(transp_Xs_new.shape, Xs_new.shape)
-
- # check computation and dimensions if bias == True
- otda = ot.da.MappingTransport(kernel="gaussian", bias=True)
- otda.fit(Xs=Xs, Xt=Xt)
- assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))
- assert_equal(otda.mapping_.shape, ((Xs.shape[0] + 1, Xt.shape[1])))
-
- # test margin constraints
- mu_s = unif(ns)
- mu_t = unif(nt)
- assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
- assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
@@ -561,29 +536,39 @@ def test_mapping_transport_class():
assert_equal(transp_Xs_new.shape, Xs_new.shape)
# check everything runs well with log=True
- otda = ot.da.MappingTransport(kernel="gaussian", log=True)
+ otda = ot.da.MappingTransport(kernel=kernel, bias=bias, log=True)
otda.fit(Xs=Xs, Xt=Xt)
assert len(otda.log_.keys()) != 0
+
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_mapping_transport_class_specific_seed(nx):
# check that it does not crash when derphi is very close to 0
+ ns = 20
+ nt = 30
np.random.seed(39)
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
otda = ot.da.MappingTransport(kernel="gaussian", bias=False)
- otda.fit(Xs=Xs, Xt=Xt)
+ otda.fit(Xs=nx.from_numpy(Xs), Xt=nx.from_numpy(Xt))
np.random.seed(None)
-def test_linear_mapping():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_linear_mapping(nx):
ns = 150
nt = 200
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
- A, b = ot.da.OT_mapping_linear(Xs, Xt)
+ Xsb, Xtb = nx.from_numpy(Xs, Xt)
- Xst = Xs.dot(A) + b
+ A, b = ot.da.OT_mapping_linear(Xsb, Xtb)
+
+ Xst = nx.to_numpy(nx.dot(Xsb, A) + b)
Ct = np.cov(Xt.T)
Cst = np.cov(Xst.T)
@@ -591,22 +576,26 @@ def test_linear_mapping():
np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)
-def test_linear_mapping_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_linear_mapping_class(nx):
ns = 150
nt = 200
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xsb, Xtb = nx.from_numpy(Xs, Xt)
+
otmap = ot.da.LinearTransport()
- otmap.fit(Xs=Xs, Xt=Xt)
+ otmap.fit(Xs=Xsb, Xt=Xtb)
assert hasattr(otmap, "A_")
assert hasattr(otmap, "B_")
assert hasattr(otmap, "A1_")
assert hasattr(otmap, "B1_")
- Xst = otmap.transform(Xs=Xs)
+ Xst = nx.to_numpy(otmap.transform(Xs=Xsb))
Ct = np.cov(Xt.T)
Cst = np.cov(Xst.T)
@@ -614,7 +603,9 @@ def test_linear_mapping_class():
np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)
-def test_jcpot_transport_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_jcpot_transport_class(nx):
"""test_jcpot_transport
"""
@@ -627,6 +618,8 @@ def test_jcpot_transport_class():
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs1, ys1, Xs2, ys2, Xt, yt = nx.from_numpy(Xs1, ys1, Xs2, ys2, Xt, yt)
+
Xs = [Xs1, Xs2]
ys = [ys1, ys2]
@@ -649,19 +642,24 @@ def test_jcpot_transport_class():
for i in range(len(Xs)):
# test margin constraints w.r.t. uniform target weights for each coupling matrix
assert_allclose(
- np.sum(otda.coupling_[i], axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_[i], axis=0)), mu_t, rtol=1e-3, atol=1e-3)
# test margin constraints w.r.t. modified source weights for each source domain
assert_allclose(
- np.dot(otda.log_['D1'][i], np.sum(otda.coupling_[i], axis=1)), otda.proportions_, rtol=1e-3,
- atol=1e-3)
+ nx.to_numpy(
+ nx.dot(otda.log_['D1'][i], nx.sum(otda.coupling_[i], axis=1))
+ ),
+ nx.to_numpy(otda.proportions_),
+ rtol=1e-3,
+ atol=1e-3
+ )
# test transform
transp_Xs = otda.transform(Xs=Xs)
[assert_equal(x.shape, y.shape) for x, y in zip(transp_Xs, Xs)]
- Xs_new, _ = make_data_classif('3gauss', ns1 + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns1 + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -670,15 +668,16 @@ def test_jcpot_transport_class():
# check label propagation
transp_yt = otda.transform_labels(ys)
assert_equal(transp_yt.shape[0], yt.shape[0])
- assert_equal(transp_yt.shape[1], len(np.unique(ys)))
+ assert_equal(transp_yt.shape[1], len(np.unique(nx.to_numpy(*ys))))
# check inverse label propagation
transp_ys = otda.inverse_transform_labels(yt)
- [assert_equal(x.shape[0], y.shape[0]) for x, y in zip(transp_ys, ys)]
- [assert_equal(x.shape[1], len(np.unique(y))) for x, y in zip(transp_ys, ys)]
+ for x, y in zip(transp_ys, ys):
+ assert_equal(x.shape[0], y.shape[0])
+ assert_equal(x.shape[1], len(np.unique(nx.to_numpy(y))))
-def test_jcpot_barycenter():
+def test_jcpot_barycenter(nx):
"""test_jcpot_barycenter
"""
@@ -695,19 +694,23 @@ def test_jcpot_barycenter():
Xs1, ys1 = make_data_classif('2gauss_prop', ns1, nz=sigma, p=ps1)
Xs2, ys2 = make_data_classif('2gauss_prop', ns2, nz=sigma, p=ps2)
- Xt, yt = make_data_classif('2gauss_prop', nt, nz=sigma, p=pt)
+ Xt, _ = make_data_classif('2gauss_prop', nt, nz=sigma, p=pt)
- Xs = [Xs1, Xs2]
- ys = [ys1, ys2]
+ Xs1b, ys1b, Xs2b, ys2b, Xtb = nx.from_numpy(Xs1, ys1, Xs2, ys2, Xt)
- prop = ot.bregman.jcpot_barycenter(Xs, ys, Xt, reg=.5, metric='sqeuclidean',
+ Xsb = [Xs1b, Xs2b]
+ ysb = [ys1b, ys2b]
+
+ prop = ot.bregman.jcpot_barycenter(Xsb, ysb, Xtb, reg=.5, metric='sqeuclidean',
numItermax=10000, stopThr=1e-9, verbose=False, log=False)
- np.testing.assert_allclose(prop, [1 - pt, pt], rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(nx.to_numpy(prop), [1 - pt, pt], rtol=1e-3, atol=1e-3)
@pytest.mark.skipif(nosklearn, reason="No sklearn available")
-def test_emd_laplace_class():
+@pytest.skip_backend("jax")
+@pytest.skip_backend("tf")
+def test_emd_laplace_class(nx):
"""test_emd_laplace_transport
"""
ns = 150
@@ -716,6 +719,8 @@ def test_emd_laplace_class():
Xs, ys = make_data_classif('3gauss', ns)
Xt, yt = make_data_classif('3gauss2', nt)
+ Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt)
+
otda = ot.da.EMDLaplaceTransport(reg_lap=0.01, max_iter=1000, tol=1e-9, verbose=False, log=True)
# test its computed
@@ -732,15 +737,15 @@ def test_emd_laplace_class():
mu_t = unif(nt)
assert_allclose(
- np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3)
assert_allclose(
- np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)
+ nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3)
# test transform
transp_Xs = otda.transform(Xs=Xs)
[assert_equal(x.shape, y.shape) for x, y in zip(transp_Xs, Xs)]
- Xs_new, _ = make_data_classif('3gauss', ns + 1)
+ Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0])
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -750,7 +755,7 @@ def test_emd_laplace_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = make_data_classif('3gauss2', nt + 1)
+ Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0])
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -763,9 +768,9 @@ def test_emd_laplace_class():
# check label propagation
transp_yt = otda.transform_labels(ys)
assert_equal(transp_yt.shape[0], yt.shape[0])
- assert_equal(transp_yt.shape[1], len(np.unique(ys)))
+ assert_equal(transp_yt.shape[1], len(np.unique(nx.to_numpy(ys))))
# check inverse label propagation
transp_ys = otda.inverse_transform_labels(yt)
assert_equal(transp_ys.shape[0], ys.shape[0])
- assert_equal(transp_ys.shape[1], len(np.unique(yt)))
+ assert_equal(transp_ys.shape[1], len(np.unique(nx.to_numpy(yt))))
diff --git a/test/test_gromov.py b/test/test_gromov.py
index 0dcf2da..12fd2b9 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -35,11 +35,7 @@ def test_gromov(nx):
C1 /= C1.max()
C2 /= C2.max()
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
- G0b = nx.from_numpy(G0)
+ C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0)
G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss', G0=G0, verbose=True)
Gb = nx.to_numpy(ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', G0=G0b, verbose=True))
@@ -105,11 +101,7 @@ def test_gromov_dtype_device(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- C1b = nx.from_numpy(C1, type_as=tp)
- C2b = nx.from_numpy(C2, type_as=tp)
- pb = nx.from_numpy(p, type_as=tp)
- qb = nx.from_numpy(q, type_as=tp)
- G0b = nx.from_numpy(G0, type_as=tp)
+ C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0, type_as=tp)
Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', G0=G0b, verbose=True)
gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', G0=G0b, log=False)
@@ -136,11 +128,7 @@ def test_gromov_device_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
- G0b = nx.from_numpy(G0)
+ C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0)
Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', G0=G0b, verbose=True)
gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', G0=G0b, log=False)
nx.assert_same_dtype_device(C1b, Gb)
@@ -148,11 +136,7 @@ def test_gromov_device_tf():
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
- G0b = nx.from_numpy(G0b)
+ C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0)
Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', verbose=True)
gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', log=False)
nx.assert_same_dtype_device(C1b, Gb)
@@ -222,10 +206,7 @@ def test_entropic_gromov(nx):
C1 /= C1.max()
C2 /= C2.max()
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
+ C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q)
G = ot.gromov.entropic_gromov_wasserstein(
C1, C2, p, q, 'square_loss', epsilon=5e-4, verbose=True)
@@ -285,10 +266,7 @@ def test_entropic_gromov_dtype_device(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- C1b = nx.from_numpy(C1, type_as=tp)
- C2b = nx.from_numpy(C2, type_as=tp)
- pb = nx.from_numpy(p, type_as=tp)
- qb = nx.from_numpy(q, type_as=tp)
+ C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q, type_as=tp)
Gb = ot.gromov.entropic_gromov_wasserstein(
C1b, C2b, pb, qb, 'square_loss', epsilon=5e-4, verbose=True
@@ -320,10 +298,7 @@ def test_pointwise_gromov(nx):
C1 /= C1.max()
C2 /= C2.max()
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
+ C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q)
def loss(x, y):
return np.abs(x - y)
@@ -381,10 +356,7 @@ def test_sampled_gromov(nx):
C1 /= C1.max()
C2 /= C2.max()
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
+ C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q)
def loss(x, y):
return np.abs(x - y)
@@ -423,19 +395,15 @@ def test_gromov_barycenter(nx):
n_samples = 3
p = ot.unif(n_samples)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- p1b = nx.from_numpy(p1)
- p2b = nx.from_numpy(p2)
- pb = nx.from_numpy(p)
+ C1b, C2b, p1b, p2b, pb = nx.from_numpy(C1, C2, p1, p2, p)
Cb = ot.gromov.gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
- 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42
+ 'square_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42
)
Cbb = nx.to_numpy(ot.gromov.gromov_barycenters(
n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
- 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42
+ 'square_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42
))
np.testing.assert_allclose(Cb, Cbb, atol=1e-06)
np.testing.assert_allclose(Cbb.shape, (n_samples, n_samples))
@@ -443,15 +411,15 @@ def test_gromov_barycenter(nx):
# test of gromov_barycenters with `log` on
Cb_, err_ = ot.gromov.gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
- 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ 'square_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42, log=True
)
Cbb_, errb_ = ot.gromov.gromov_barycenters(
n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
- 'square_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ 'square_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42, log=True
)
Cbb_ = nx.to_numpy(Cbb_)
np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06)
- np.testing.assert_array_almost_equal(err_['err'], errb_['err'])
+ np.testing.assert_array_almost_equal(err_['err'], nx.to_numpy(*errb_['err']))
np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples))
Cb2 = ot.gromov.gromov_barycenters(
@@ -468,15 +436,15 @@ def test_gromov_barycenter(nx):
# test of gromov_barycenters with `log` on
Cb2_, err2_ = ot.gromov.gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
- 'kl_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ 'kl_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42, log=True
)
Cb2b_, err2b_ = ot.gromov.gromov_barycenters(
n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5],
- 'kl_loss', max_iter=100, tol=1e-3, verbose=True, random_state=42, log=True
+ 'kl_loss', max_iter=100, tol=1e-3, verbose=False, random_state=42, log=True
)
Cb2b_ = nx.to_numpy(Cb2b_)
np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06)
- np.testing.assert_array_almost_equal(err2_['err'], err2_['err'])
+ np.testing.assert_array_almost_equal(err2_['err'], nx.to_numpy(*err2b_['err']))
np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples))
@@ -495,11 +463,7 @@ def test_gromov_entropic_barycenter(nx):
n_samples = 2
p = ot.unif(n_samples)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- p1b = nx.from_numpy(p1)
- p2b = nx.from_numpy(p2)
- pb = nx.from_numpy(p)
+ C1b, C2b, p1b, p2b, pb = nx.from_numpy(C1, C2, p1, p2, p)
Cb = ot.gromov.entropic_gromov_barycenters(
n_samples, [C1, C2], [p1, p2], p, [.5, .5],
@@ -523,7 +487,7 @@ def test_gromov_entropic_barycenter(nx):
)
Cbb_ = nx.to_numpy(Cbb_)
np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06)
- np.testing.assert_array_almost_equal(err_['err'], errb_['err'])
+ np.testing.assert_array_almost_equal(err_['err'], nx.to_numpy(*errb_['err']))
np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(
@@ -548,7 +512,7 @@ def test_gromov_entropic_barycenter(nx):
)
Cb2b_ = nx.to_numpy(Cb2b_)
np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06)
- np.testing.assert_array_almost_equal(err2_['err'], err2_['err'])
+ np.testing.assert_array_almost_equal(err2_['err'], nx.to_numpy(*err2b_['err']))
np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples))
@@ -578,12 +542,7 @@ def test_fgw(nx):
M = ot.dist(ys, yt)
M /= M.max()
- Mb = nx.from_numpy(M)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- pb = nx.from_numpy(p)
- qb = nx.from_numpy(q)
- G0b = nx.from_numpy(G0)
+ Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0)
G, log = ot.gromov.fused_gromov_wasserstein(M, C1, C2, p, q, 'square_loss', alpha=0.5, G0=G0, log=True)
Gb, logb = ot.gromov.fused_gromov_wasserstein(Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, G0=G0b, log=True)
@@ -681,13 +640,7 @@ def test_fgw_barycenter(nx):
n_samples = 3
p = ot.unif(n_samples)
- ysb = nx.from_numpy(ys)
- ytb = nx.from_numpy(yt)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- p1b = nx.from_numpy(p1)
- p2b = nx.from_numpy(p2)
- pb = nx.from_numpy(p)
+ ysb, ytb, C1b, C2b, p1b, p2b, pb = nx.from_numpy(ys, yt, C1, C2, p1, p2, p)
Xb, Cb = ot.gromov.fgw_barycenters(
n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], [.5, .5], 0.5, fixed_structure=False,
@@ -731,10 +684,8 @@ def test_gromov_wasserstein_linear_unmixing(nx):
Cdict = np.stack([C1, C2])
p = ot.unif(n)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- Cdictb = nx.from_numpy(Cdict)
- pb = nx.from_numpy(p)
+ C1b, C2b, Cdictb, pb = nx.from_numpy(C1, C2, Cdict, p)
+
tol = 10**(-5)
# Tests without regularization
reg = 0.
@@ -764,8 +715,8 @@ def test_gromov_wasserstein_linear_unmixing(nx):
np.testing.assert_allclose(unmixing2, [0., 1.], atol=1e-01)
np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-06)
np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-06)
- np.testing.assert_allclose(reconstruction1, reconstruction1b, atol=1e-06)
- np.testing.assert_allclose(reconstruction2, reconstruction2b, atol=1e-06)
+ np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06)
+ np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06)
np.testing.assert_allclose(C1b_emb.shape, (n, n))
np.testing.assert_allclose(C2b_emb.shape, (n, n))
@@ -798,8 +749,8 @@ def test_gromov_wasserstein_linear_unmixing(nx):
np.testing.assert_allclose(unmixing2, [0., 1.], atol=1e-01)
np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-06)
np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-06)
- np.testing.assert_allclose(reconstruction1, reconstruction1b, atol=1e-06)
- np.testing.assert_allclose(reconstruction2, reconstruction2b, atol=1e-06)
+ np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06)
+ np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06)
np.testing.assert_allclose(C1b_emb.shape, (n, n))
np.testing.assert_allclose(C2b_emb.shape, (n, n))
@@ -824,13 +775,14 @@ def test_gromov_wasserstein_dictionary_learning(nx):
dataset_means = [C.mean() for C in Cs]
np.random.seed(0)
Cdict_init = np.random.normal(loc=np.mean(dataset_means), scale=np.std(dataset_means), size=(n_atoms, shape, shape))
+
if projection == 'nonnegative_symmetric':
Cdict_init = 0.5 * (Cdict_init + Cdict_init.transpose((0, 2, 1)))
Cdict_init[Cdict_init < 0.] = 0.
- Csb = [nx.from_numpy(C) for C in Cs]
- psb = [nx.from_numpy(p) for p in ps]
- qb = nx.from_numpy(q)
- Cdict_initb = nx.from_numpy(Cdict_init)
+
+ Csb = nx.from_numpy(*Cs)
+ psb = nx.from_numpy(*ps)
+ qb, Cdict_initb = nx.from_numpy(q, Cdict_init)
# Test: compare reconstruction error using initial dictionary and dictionary learned using this initialization
# > Compute initial reconstruction of samples on this random dictionary without backend
@@ -882,6 +834,7 @@ def test_gromov_wasserstein_dictionary_learning(nx):
)
total_reconstruction_b += reconstruction
+ total_reconstruction_b = nx.to_numpy(total_reconstruction_b)
np.testing.assert_array_less(total_reconstruction_b, initial_total_reconstruction)
np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05)
np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05)
@@ -924,6 +877,7 @@ def test_gromov_wasserstein_dictionary_learning(nx):
)
total_reconstruction_b_bis += reconstruction
+ total_reconstruction_b_bis = nx.to_numpy(total_reconstruction_b_bis)
np.testing.assert_allclose(total_reconstruction_b_bis, total_reconstruction_b, atol=1e-05)
np.testing.assert_allclose(Cdict_bis, nx.to_numpy(Cdictb_bis), atol=1e-03)
@@ -969,6 +923,7 @@ def test_gromov_wasserstein_dictionary_learning(nx):
)
total_reconstruction_b_bis2 += reconstruction
+ total_reconstruction_b_bis2 = nx.to_numpy(total_reconstruction_b_bis2)
np.testing.assert_allclose(total_reconstruction_b_bis2, total_reconstruction_bis2, atol=1e-05)
@@ -985,12 +940,8 @@ def test_fused_gromov_wasserstein_linear_unmixing(nx):
Ydict = np.stack([F, F])
p = ot.unif(n)
- C1b = nx.from_numpy(C1)
- C2b = nx.from_numpy(C2)
- Fb = nx.from_numpy(F)
- Cdictb = nx.from_numpy(Cdict)
- Ydictb = nx.from_numpy(Ydict)
- pb = nx.from_numpy(p)
+ C1b, C2b, Fb, Cdictb, Ydictb, pb = nx.from_numpy(C1, C2, F, Cdict, Ydict, p)
+
# Tests without regularization
reg = 0.
@@ -1022,8 +973,8 @@ def test_fused_gromov_wasserstein_linear_unmixing(nx):
np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-03)
np.testing.assert_allclose(Y1_emb, nx.to_numpy(Y1b_emb), atol=1e-03)
np.testing.assert_allclose(Y2_emb, nx.to_numpy(Y2b_emb), atol=1e-03)
- np.testing.assert_allclose(reconstruction1, reconstruction1b, atol=1e-06)
- np.testing.assert_allclose(reconstruction2, reconstruction2b, atol=1e-06)
+ np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06)
+ np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06)
np.testing.assert_allclose(C1b_emb.shape, (n, n))
np.testing.assert_allclose(C2b_emb.shape, (n, n))
@@ -1058,8 +1009,8 @@ def test_fused_gromov_wasserstein_linear_unmixing(nx):
np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-03)
np.testing.assert_allclose(Y1_emb, nx.to_numpy(Y1b_emb), atol=1e-03)
np.testing.assert_allclose(Y2_emb, nx.to_numpy(Y2b_emb), atol=1e-03)
- np.testing.assert_allclose(reconstruction1, reconstruction1b, atol=1e-06)
- np.testing.assert_allclose(reconstruction2, reconstruction2b, atol=1e-06)
+ np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06)
+ np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06)
np.testing.assert_allclose(C1b_emb.shape, (n, n))
np.testing.assert_allclose(C2b_emb.shape, (n, n))
@@ -1093,12 +1044,10 @@ def test_fused_gromov_wasserstein_dictionary_learning(nx):
dataset_feature_means = np.stack([Y.mean(axis=0) for Y in Ys])
Ydict_init = np.random.normal(loc=dataset_feature_means.mean(axis=0), scale=dataset_feature_means.std(axis=0), size=(n_atoms, shape, 2))
- Csb = [nx.from_numpy(C) for C in Cs]
- Ysb = [nx.from_numpy(Y) for Y in Ys]
- psb = [nx.from_numpy(p) for p in ps]
- qb = nx.from_numpy(q)
- Cdict_initb = nx.from_numpy(Cdict_init)
- Ydict_initb = nx.from_numpy(Ydict_init)
+ Csb = nx.from_numpy(*Cs)
+ Ysb = nx.from_numpy(*Ys)
+ psb = nx.from_numpy(*ps)
+ qb, Cdict_initb, Ydict_initb = nx.from_numpy(q, Cdict_init, Ydict_init)
# Test: Compute initial reconstruction of samples on this random dictionary
alpha = 0.5
@@ -1151,6 +1100,7 @@ def test_fused_gromov_wasserstein_dictionary_learning(nx):
)
total_reconstruction_b += reconstruction
+ total_reconstruction_b = nx.to_numpy(total_reconstruction_b)
np.testing.assert_array_less(total_reconstruction_b, initial_total_reconstruction)
np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05)
np.testing.assert_allclose(Cdict, nx.to_numpy(Cdictb), atol=1e-03)
@@ -1192,6 +1142,8 @@ def test_fused_gromov_wasserstein_dictionary_learning(nx):
tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200
)
total_reconstruction_b_bis += reconstruction
+
+ total_reconstruction_b_bis = nx.to_numpy(total_reconstruction_b_bis)
np.testing.assert_allclose(total_reconstruction_b_bis, total_reconstruction_b, atol=1e-05)
# Test: without using adam optimizer, with log and verbose set to True
@@ -1237,4 +1189,5 @@ def test_fused_gromov_wasserstein_dictionary_learning(nx):
total_reconstruction_b_bis2 += reconstruction
# > Compare results with/without backend
+ total_reconstruction_b_bis2 = nx.to_numpy(total_reconstruction_b_bis2)
np.testing.assert_allclose(total_reconstruction_bis2, total_reconstruction_b_bis2, atol=1e-05)
diff --git a/test/test_optim.py b/test/test_optim.py
index 41f9cbe..67e9d13 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -32,9 +32,7 @@ def test_conditional_gradient(nx):
def fb(G):
return 0.5 * nx.sum(G ** 2)
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- Mb = nx.from_numpy(M, type_as=ab)
+ ab, bb, Mb = nx.from_numpy(a, b, M)
reg = 1e-1
@@ -74,9 +72,7 @@ def test_conditional_gradient_itermax(nx):
def fb(G):
return 0.5 * nx.sum(G ** 2)
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- Mb = nx.from_numpy(M, type_as=ab)
+ ab, bb, Mb = nx.from_numpy(a, b, M)
reg = 1e-1
@@ -118,9 +114,7 @@ def test_generalized_conditional_gradient(nx):
reg1 = 1e-3
reg2 = 1e-1
- ab = nx.from_numpy(a)
- bb = nx.from_numpy(b)
- Mb = nx.from_numpy(M, type_as=ab)
+ ab, bb, Mb = nx.from_numpy(a, b, M)
G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True)
Gb, log = ot.optim.gcg(ab, bb, Mb, reg1, reg2, fb, df, verbose=True, log=True)
@@ -142,9 +136,12 @@ def test_line_search_armijo(nx):
pk = np.array([[-0.25, 0.25], [0.25, -0.25]])
gfk = np.array([[23.04273441, 23.0449082], [23.04273441, 23.0449082]])
old_fval = -123
+
+ xkb, pkb, gfkb = nx.from_numpy(xk, pk, gfk)
+
# Should not throw an exception and return 0. for alpha
alpha, a, b = ot.optim.line_search_armijo(
- lambda x: 1, nx.from_numpy(xk), nx.from_numpy(pk), nx.from_numpy(gfk), old_fval
+ lambda x: 1, xkb, pkb, gfkb, old_fval
)
alpha_np, anp, bnp = ot.optim.line_search_armijo(
lambda x: 1, xk, pk, gfk, old_fval
diff --git a/test/test_ot.py b/test/test_ot.py
index 3e2d845..bb258e2 100644
--- a/test/test_ot.py
+++ b/test/test_ot.py
@@ -47,8 +47,7 @@ def test_emd_backends(nx):
G = ot.emd(a, a, M)
- ab = nx.from_numpy(a)
- Mb = nx.from_numpy(M)
+ ab, Mb = nx.from_numpy(a, M)
Gb = ot.emd(ab, ab, Mb)
@@ -68,8 +67,7 @@ def test_emd2_backends(nx):
val = ot.emd2(a, a, M)
- ab = nx.from_numpy(a)
- Mb = nx.from_numpy(M)
+ ab, Mb = nx.from_numpy(a, M)
valb = ot.emd2(ab, ab, Mb)
@@ -90,8 +88,7 @@ def test_emd_emd2_types_devices(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- ab = nx.from_numpy(a, type_as=tp)
- Mb = nx.from_numpy(M, type_as=tp)
+ ab, Mb = nx.from_numpy(a, M, type_as=tp)
Gb = ot.emd(ab, ab, Mb)
@@ -117,8 +114,7 @@ def test_emd_emd2_devices_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- ab = nx.from_numpy(a)
- Mb = nx.from_numpy(M)
+ ab, Mb = nx.from_numpy(a, M)
Gb = ot.emd(ab, ab, Mb)
w = ot.emd2(ab, ab, Mb)
nx.assert_same_dtype_device(Mb, Gb)
@@ -126,8 +122,7 @@ def test_emd_emd2_devices_tf():
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- ab = nx.from_numpy(a)
- Mb = nx.from_numpy(M)
+ ab, Mb = nx.from_numpy(a, M)
Gb = ot.emd(ab, ab, Mb)
w = ot.emd2(ab, ab, Mb)
nx.assert_same_dtype_device(Mb, Gb)
@@ -310,8 +305,8 @@ def test_free_support_barycenter_backends(nx):
X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init)
- measures_locations2 = [nx.from_numpy(x) for x in measures_locations]
- measures_weights2 = [nx.from_numpy(x) for x in measures_weights]
+ measures_locations2 = nx.from_numpy(*measures_locations)
+ measures_weights2 = nx.from_numpy(*measures_weights)
X_init2 = nx.from_numpy(X_init)
X2 = ot.lp.free_support_barycenter(measures_locations2, measures_weights2, X_init2)
diff --git a/test/test_sliced.py b/test/test_sliced.py
index 91e0961..08ab4fb 100644
--- a/test/test_sliced.py
+++ b/test/test_sliced.py
@@ -123,9 +123,7 @@ def test_sliced_backend(nx):
n_projections = 20
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
val0 = ot.sliced_wasserstein_distance(x, y, projections=P)
@@ -153,9 +151,7 @@ def test_sliced_backend_type_devices(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- xb = nx.from_numpy(x, type_as=tp)
- yb = nx.from_numpy(y, type_as=tp)
- Pb = nx.from_numpy(P, type_as=tp)
+ xb, yb, Pb = nx.from_numpy(x, y, P, type_as=tp)
valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb)
@@ -174,17 +170,13 @@ def test_sliced_backend_device_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb)
nx.assert_same_dtype_device(xb, valb)
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb)
nx.assert_same_dtype_device(xb, valb)
assert nx.dtype_device(valb)[1].startswith("GPU")
@@ -203,9 +195,7 @@ def test_max_sliced_backend(nx):
n_projections = 20
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
val0 = ot.max_sliced_wasserstein_distance(x, y, projections=P)
@@ -233,9 +223,7 @@ def test_max_sliced_backend_type_devices(nx):
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
- xb = nx.from_numpy(x, type_as=tp)
- yb = nx.from_numpy(y, type_as=tp)
- Pb = nx.from_numpy(P, type_as=tp)
+ xb, yb, Pb = nx.from_numpy(x, y, P, type_as=tp)
valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
@@ -254,17 +242,13 @@ def test_max_sliced_backend_device_tf():
# Check that everything stays on the CPU
with tf.device("/CPU:0"):
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
nx.assert_same_dtype_device(xb, valb)
if len(tf.config.list_physical_devices('GPU')) > 0:
# Check that everything happens on the GPU
- xb = nx.from_numpy(x)
- yb = nx.from_numpy(y)
- Pb = nx.from_numpy(P)
+ xb, yb, Pb = nx.from_numpy(x, y, P)
valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)
nx.assert_same_dtype_device(xb, valb)
assert nx.dtype_device(valb)[1].startswith("GPU")
diff --git a/test/test_unbalanced.py b/test/test_unbalanced.py
index e8349d1..db59504 100644
--- a/test/test_unbalanced.py
+++ b/test/test_unbalanced.py
@@ -9,11 +9,9 @@ import ot
import pytest
from ot.unbalanced import barycenter_unbalanced
-from scipy.special import logsumexp
-
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"])
-def test_unbalanced_convergence(method):
+def test_unbalanced_convergence(nx, method):
# test generalized sinkhorn for unbalanced OT
n = 100
rng = np.random.RandomState(42)
@@ -28,36 +26,51 @@ def test_unbalanced_convergence(method):
epsilon = 1.
reg_m = 1.
+ a, b, M = nx.from_numpy(a, b, M)
+
G, log = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon,
reg_m=reg_m,
method=method,
log=True,
verbose=True)
- loss = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m,
- method=method,
- verbose=True)
+ loss = nx.to_numpy(ot.unbalanced.sinkhorn_unbalanced2(
+ a, b, M, epsilon, reg_m, method=method, verbose=True
+ ))
# check fixed point equations
# in log-domain
fi = reg_m / (reg_m + epsilon)
- logb = np.log(b + 1e-16)
- loga = np.log(a + 1e-16)
- logKtu = logsumexp(log["logu"][None, :] - M.T / epsilon, axis=1)
- logKv = logsumexp(log["logv"][None, :] - M / epsilon, axis=1)
+ logb = nx.log(b + 1e-16)
+ loga = nx.log(a + 1e-16)
+ logKtu = nx.logsumexp(log["logu"][None, :] - M.T / epsilon, axis=1)
+ logKv = nx.logsumexp(log["logv"][None, :] - M / epsilon, axis=1)
v_final = fi * (logb - logKtu)
u_final = fi * (loga - logKv)
np.testing.assert_allclose(
- u_final, log["logu"], atol=1e-05)
+ nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05)
np.testing.assert_allclose(
- v_final, log["logv"], atol=1e-05)
+ nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05)
# check if sinkhorn_unbalanced2 returns the correct loss
- np.testing.assert_allclose((G * M).sum(), loss, atol=1e-5)
+ np.testing.assert_allclose(nx.to_numpy(nx.sum(G * M)), loss, atol=1e-5)
+
+ # check in case no histogram is provided
+ M_np = nx.to_numpy(M)
+ a_np, b_np = np.array([]), np.array([])
+ a, b = nx.from_numpy(a_np, b_np)
+
+ G = ot.unbalanced.sinkhorn_unbalanced(
+ a, b, M, reg=epsilon, reg_m=reg_m, method=method, verbose=True
+ )
+ G_np = ot.unbalanced.sinkhorn_unbalanced(
+ a_np, b_np, M_np, reg=epsilon, reg_m=reg_m, method=method, verbose=True
+ )
+ np.testing.assert_allclose(G_np, nx.to_numpy(G))
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"])
-def test_unbalanced_multiple_inputs(method):
+def test_unbalanced_multiple_inputs(nx, method):
# test generalized sinkhorn for unbalanced OT
n = 100
rng = np.random.RandomState(42)
@@ -72,6 +85,8 @@ def test_unbalanced_multiple_inputs(method):
epsilon = 1.
reg_m = 1.
+ a, b, M = nx.from_numpy(a, b, M)
+
loss, log = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon,
reg_m=reg_m,
method=method,
@@ -80,23 +95,24 @@ def test_unbalanced_multiple_inputs(method):
# check fixed point equations
# in log-domain
fi = reg_m / (reg_m + epsilon)
- logb = np.log(b + 1e-16)
- loga = np.log(a + 1e-16)[:, None]
- logKtu = logsumexp(log["logu"][:, None, :] - M[:, :, None] / epsilon,
- axis=0)
- logKv = logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1)
+ logb = nx.log(b + 1e-16)
+ loga = nx.log(a + 1e-16)[:, None]
+ logKtu = nx.logsumexp(
+ log["logu"][:, None, :] - M[:, :, None] / epsilon, axis=0
+ )
+ logKv = nx.logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1)
v_final = fi * (logb - logKtu)
u_final = fi * (loga - logKv)
np.testing.assert_allclose(
- u_final, log["logu"], atol=1e-05)
+ nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05)
np.testing.assert_allclose(
- v_final, log["logv"], atol=1e-05)
+ nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05)
assert len(loss) == b.shape[1]
-def test_stabilized_vs_sinkhorn():
+def test_stabilized_vs_sinkhorn(nx):
# test if stable version matches sinkhorn
n = 100
@@ -112,19 +128,27 @@ def test_stabilized_vs_sinkhorn():
M /= np.median(M)
epsilon = 0.1
reg_m = 1.
- G, log = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, reg=epsilon,
- method="sinkhorn_stabilized",
- reg_m=reg_m,
- log=True,
- verbose=True)
- G2, log2 = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m,
- method="sinkhorn", log=True)
+
+ ab, bb, Mb = nx.from_numpy(a, b, M)
+
+ G, _ = ot.unbalanced.sinkhorn_unbalanced2(
+ ab, bb, Mb, epsilon, reg_m, method="sinkhorn_stabilized", log=True
+ )
+ G2, _ = ot.unbalanced.sinkhorn_unbalanced2(
+ ab, bb, Mb, epsilon, reg_m, method="sinkhorn", log=True
+ )
+ G2_np, _ = ot.unbalanced.sinkhorn_unbalanced2(
+ a, b, M, epsilon, reg_m, method="sinkhorn", log=True
+ )
+ G = nx.to_numpy(G)
+ G2 = nx.to_numpy(G2)
np.testing.assert_allclose(G, G2, atol=1e-5)
+ np.testing.assert_allclose(G2, G2_np, atol=1e-5)
@pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"])
-def test_unbalanced_barycenter(method):
+def test_unbalanced_barycenter(nx, method):
# test generalized sinkhorn for unbalanced OT barycenter
n = 100
rng = np.random.RandomState(42)
@@ -138,25 +162,29 @@ def test_unbalanced_barycenter(method):
epsilon = 1.
reg_m = 1.
- q, log = barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m,
- method=method, log=True, verbose=True)
+ A, M = nx.from_numpy(A, M)
+
+ q, log = barycenter_unbalanced(
+ A, M, reg=epsilon, reg_m=reg_m, method=method, log=True, verbose=True
+ )
# check fixed point equations
fi = reg_m / (reg_m + epsilon)
- logA = np.log(A + 1e-16)
- logq = np.log(q + 1e-16)[:, None]
- logKtu = logsumexp(log["logu"][:, None, :] - M[:, :, None] / epsilon,
- axis=0)
- logKv = logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1)
+ logA = nx.log(A + 1e-16)
+ logq = nx.log(q + 1e-16)[:, None]
+ logKtu = nx.logsumexp(
+ log["logu"][:, None, :] - M[:, :, None] / epsilon, axis=0
+ )
+ logKv = nx.logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1)
v_final = fi * (logq - logKtu)
u_final = fi * (logA - logKv)
np.testing.assert_allclose(
- u_final, log["logu"], atol=1e-05)
+ nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05)
np.testing.assert_allclose(
- v_final, log["logv"], atol=1e-05)
+ nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05)
-def test_barycenter_stabilized_vs_sinkhorn():
+def test_barycenter_stabilized_vs_sinkhorn(nx):
# test generalized sinkhorn for unbalanced OT barycenter
n = 100
rng = np.random.RandomState(42)
@@ -170,21 +198,24 @@ def test_barycenter_stabilized_vs_sinkhorn():
epsilon = 0.5
reg_m = 10
- qstable, log = barycenter_unbalanced(A, M, reg=epsilon,
- reg_m=reg_m, log=True,
- tau=100,
- method="sinkhorn_stabilized",
- verbose=True
- )
- q, log = barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m,
- method="sinkhorn",
- log=True)
+ Ab, Mb = nx.from_numpy(A, M)
- np.testing.assert_allclose(
- q, qstable, atol=1e-05)
+ qstable, _ = barycenter_unbalanced(
+ Ab, Mb, reg=epsilon, reg_m=reg_m, log=True, tau=100,
+ method="sinkhorn_stabilized", verbose=True
+ )
+ q, _ = barycenter_unbalanced(
+ Ab, Mb, reg=epsilon, reg_m=reg_m, method="sinkhorn", log=True
+ )
+ q_np, _ = barycenter_unbalanced(
+ A, M, reg=epsilon, reg_m=reg_m, method="sinkhorn", log=True
+ )
+ q, qstable = nx.to_numpy(q, qstable)
+ np.testing.assert_allclose(q, qstable, atol=1e-05)
+ np.testing.assert_allclose(q, q_np, atol=1e-05)
-def test_wrong_method():
+def test_wrong_method(nx):
n = 10
rng = np.random.RandomState(42)
@@ -199,19 +230,20 @@ def test_wrong_method():
epsilon = 1.
reg_m = 1.
+ a, b, M = nx.from_numpy(a, b, M)
+
with pytest.raises(ValueError):
- ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon,
- reg_m=reg_m,
- method='badmethod',
- log=True,
- verbose=True)
+ ot.unbalanced.sinkhorn_unbalanced(
+ a, b, M, reg=epsilon, reg_m=reg_m, method='badmethod',
+ log=True, verbose=True
+ )
with pytest.raises(ValueError):
- ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m,
- method='badmethod',
- verbose=True)
+ ot.unbalanced.sinkhorn_unbalanced2(
+ a, b, M, epsilon, reg_m, method='badmethod', verbose=True
+ )
-def test_implemented_methods():
+def test_implemented_methods(nx):
IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized']
TO_BE_IMPLEMENTED_METHODS = ['sinkhorn_reg_scaling']
NOT_VALID_TOKENS = ['foo']
@@ -228,6 +260,9 @@ def test_implemented_methods():
M = ot.dist(x, x)
epsilon = 1.
reg_m = 1.
+
+ a, b, M, A = nx.from_numpy(a, b, M, A)
+
for method in IMPLEMENTED_METHODS:
ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, reg_m,
method=method)
diff --git a/test/test_weak.py b/test/test_weak.py
index c4c3278..945efb1 100644
--- a/test/test_weak.py
+++ b/test/test_weak.py
@@ -45,9 +45,7 @@ def test_weak_ot_bakends(nx):
G = ot.weak_optimal_transport(xs, xt, u, u)
- xs2 = nx.from_numpy(xs)
- xt2 = nx.from_numpy(xt)
- u2 = nx.from_numpy(u)
+ xs2, xt2, u2 = nx.from_numpy(xs, xt, u)
G2 = ot.weak_optimal_transport(xs2, xt2, u2, u2)