summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authortvayer <titouan.vayer@gmail.com>2019-05-29 14:24:05 +0200
committertvayer <titouan.vayer@gmail.com>2019-05-29 14:24:05 +0200
commit63bbeb34e48f02c97a762dab5232158d90a5cffc (patch)
tree853026b5854b6e4b01fdf750db139985b3dd596f /test
parentf70aabfcc11f92181e0dc987b341bad8ec030d75 (diff)
parentf66ab58c7c895011fd37bafd3e848828399c56c4 (diff)
Merge remote-tracking branch 'rflamary/master'
merge pot
Diffstat (limited to 'test')
-rw-r--r--test/test_bregman.py119
-rw-r--r--test/test_da.py63
-rw-r--r--test/test_gpu.py101
-rw-r--r--test/test_gromov.py10
-rw-r--r--test/test_ot.py27
-rw-r--r--test/test_plot.py13
-rw-r--r--test/test_stochastic.py215
-rw-r--r--test/test_utils.py2
8 files changed, 432 insertions, 118 deletions
diff --git a/test/test_bregman.py b/test/test_bregman.py
index c8e9179..7f4972c 100644
--- a/test/test_bregman.py
+++ b/test/test_bregman.py
@@ -1,6 +1,7 @@
"""Tests for module bregman on OT with bregman projections """
# Author: Remi Flamary <remi.flamary@unice.fr>
+# Kilian Fatras <kilian.fatras@irisa.fr>
#
# License: MIT License
@@ -71,11 +72,39 @@ def test_sinkhorn_variants():
Ges = ot.sinkhorn(
u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10)
Gerr = ot.sinkhorn(u, u, M, 1, method='do_not_exists', stopThr=1e-10)
+ G_green = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10)
# check values
np.testing.assert_allclose(G0, Gs, atol=1e-05)
np.testing.assert_allclose(G0, Ges, atol=1e-05)
np.testing.assert_allclose(G0, Gerr)
+ np.testing.assert_allclose(G0, G_green, atol=1e-5)
+ print(G0, G_green)
+
+
+def test_sinkhorn_variants_log():
+ # test sinkhorn
+ n = 100
+ rng = np.random.RandomState(0)
+
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+
+ M = ot.dist(x, x)
+
+ G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True)
+ Gs, logs = ot.sinkhorn(u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True)
+ Ges, loges = ot.sinkhorn(
+ u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True)
+ Gerr, logerr = ot.sinkhorn(u, u, M, 1, method='do_not_exists', stopThr=1e-10, log=True)
+ G_green, loggreen = ot.sinkhorn(u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True)
+
+ # check values
+ np.testing.assert_allclose(G0, Gs, atol=1e-05)
+ np.testing.assert_allclose(G0, Ges, atol=1e-05)
+ np.testing.assert_allclose(G0, Gerr)
+ np.testing.assert_allclose(G0, G_green, atol=1e-5)
+ print(G0, G_green)
def test_bary():
@@ -105,6 +134,30 @@ def test_bary():
ot.bregman.barycenter(A, M, reg, log=True, verbose=True)
+def test_wasserstein_bary_2d():
+
+ size = 100 # size of a square image
+ a1 = np.random.randn(size, size)
+ a1 += a1.min()
+ a1 = a1 / np.sum(a1)
+ a2 = np.random.randn(size, size)
+ a2 += a2.min()
+ a2 = a2 / np.sum(a2)
+ # creating matrix A containing all distributions
+ A = np.zeros((2, size, size))
+ A[0, :, :] = a1
+ A[1, :, :] = a2
+
+ # wasserstein
+ reg = 1e-2
+ bary_wass = ot.bregman.convolutional_barycenter2d(A, reg)
+
+ np.testing.assert_allclose(1, np.sum(bary_wass))
+
+ # help in checking if log and verbose do not bug the function
+ ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True)
+
+
def test_unmix():
n_bins = 50 # nb bins
@@ -135,3 +188,69 @@ def test_unmix():
ot.bregman.unmix(a, D, M, M0, h0, reg,
1, alpha=0.01, log=True, verbose=True)
+
+
+def test_empirical_sinkhorn():
+ # test sinkhorn
+ n = 100
+ a = ot.unif(n)
+ b = ot.unif(n)
+
+ X_s = np.reshape(np.arange(n), (n, 1))
+ X_t = np.reshape(np.arange(0, n), (n, 1))
+ M = ot.dist(X_s, X_t)
+ M_m = ot.dist(X_s, X_t, metric='minkowski')
+
+ G_sqe = ot.bregman.empirical_sinkhorn(X_s, X_t, 1)
+ sinkhorn_sqe = ot.sinkhorn(a, b, M, 1)
+
+ G_log, log_es = ot.bregman.empirical_sinkhorn(X_s, X_t, 0.1, log=True)
+ sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True)
+
+ G_m = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, metric='minkowski')
+ sinkhorn_m = ot.sinkhorn(a, b, M_m, 1)
+
+ loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2(X_s, X_t, 1)
+ loss_sinkhorn = ot.sinkhorn2(a, b, M, 1)
+
+ # check constratints
+ np.testing.assert_allclose(
+ sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) # metric sqeuclidian
+ np.testing.assert_allclose(
+ sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) # metric sqeuclidian
+ np.testing.assert_allclose(
+ sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) # log
+ np.testing.assert_allclose(
+ sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) # log
+ np.testing.assert_allclose(
+ sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) # metric euclidian
+ np.testing.assert_allclose(
+ sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) # metric euclidian
+ np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05)
+
+
+def test_empirical_sinkhorn_divergence():
+ #Test sinkhorn divergence
+ n = 10
+ a = ot.unif(n)
+ b = ot.unif(n)
+ X_s = np.reshape(np.arange(n), (n, 1))
+ X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1))
+ M = ot.dist(X_s, X_t)
+ M_s = ot.dist(X_s, X_s)
+ M_t = ot.dist(X_t, X_t)
+
+ emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1)
+ sinkhorn_div = (ot.sinkhorn2(a, b, M, 1) - 1 / 2 * ot.sinkhorn2(a, a, M_s, 1) - 1 / 2 * ot.sinkhorn2(b, b, M_t, 1))
+
+ emp_sinkhorn_div_log, log_es = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, log=True)
+ sink_div_log_ab, log_s_ab = ot.sinkhorn2(a, b, M, 1, log=True)
+ sink_div_log_a, log_s_a = ot.sinkhorn2(a, a, M_s, 1, log=True)
+ sink_div_log_b, log_s_b = ot.sinkhorn2(b, b, M_t, 1, log=True)
+ sink_div_log = sink_div_log_ab - 1 / 2 * (sink_div_log_a + sink_div_log_b)
+
+ # check constratints
+ np.testing.assert_allclose(
+ emp_sinkhorn_div, sinkhorn_div, atol=1e-05) # cf conv emp sinkhorn
+ np.testing.assert_allclose(
+ emp_sinkhorn_div_log, sink_div_log, atol=1e-05) # cf conv emp sinkhorn
diff --git a/test/test_da.py b/test/test_da.py
index 97e23da..f7f3a9d 100644
--- a/test/test_da.py
+++ b/test/test_da.py
@@ -484,66 +484,3 @@ def test_linear_mapping_class():
Cst = np.cov(Xst.T)
np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)
-
-
-def test_otda():
-
- n_samples = 150 # nb samples
- np.random.seed(0)
-
- xs, ys = ot.datasets.make_data_classif('3gauss', n_samples)
- xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples)
-
- a, b = ot.unif(n_samples), ot.unif(n_samples)
-
- # LP problem
- da_emd = ot.da.OTDA() # init class
- da_emd.fit(xs, xt) # fit distributions
- da_emd.interp() # interpolation of source samples
- da_emd.predict(xs) # interpolation of source samples
-
- np.testing.assert_allclose(a, np.sum(da_emd.G, 1))
- np.testing.assert_allclose(b, np.sum(da_emd.G, 0))
-
- # sinkhorn regularization
- lambd = 1e-1
- da_entrop = ot.da.OTDA_sinkhorn()
- da_entrop.fit(xs, xt, reg=lambd)
- da_entrop.interp()
- da_entrop.predict(xs)
-
- np.testing.assert_allclose(
- a, np.sum(da_entrop.G, 1), rtol=1e-3, atol=1e-3)
- np.testing.assert_allclose(b, np.sum(da_entrop.G, 0), rtol=1e-3, atol=1e-3)
-
- # non-convex Group lasso regularization
- reg = 1e-1
- eta = 1e0
- da_lpl1 = ot.da.OTDA_lpl1()
- da_lpl1.fit(xs, ys, xt, reg=reg, eta=eta)
- da_lpl1.interp()
- da_lpl1.predict(xs)
-
- np.testing.assert_allclose(a, np.sum(da_lpl1.G, 1), rtol=1e-3, atol=1e-3)
- np.testing.assert_allclose(b, np.sum(da_lpl1.G, 0), rtol=1e-3, atol=1e-3)
-
- # True Group lasso regularization
- reg = 1e-1
- eta = 2e0
- da_l1l2 = ot.da.OTDA_l1l2()
- da_l1l2.fit(xs, ys, xt, reg=reg, eta=eta, numItermax=20, verbose=True)
- da_l1l2.interp()
- da_l1l2.predict(xs)
-
- np.testing.assert_allclose(a, np.sum(da_l1l2.G, 1), rtol=1e-3, atol=1e-3)
- np.testing.assert_allclose(b, np.sum(da_l1l2.G, 0), rtol=1e-3, atol=1e-3)
-
- # linear mapping
- da_emd = ot.da.OTDA_mapping_linear() # init class
- da_emd.fit(xs, xt, numItermax=10) # fit distributions
- da_emd.predict(xs) # interpolation of source samples
-
- # nonlinear mapping
- da_emd = ot.da.OTDA_mapping_kernel() # init class
- da_emd.fit(xs, xt, numItermax=10) # fit distributions
- da_emd.predict(xs) # interpolation of source samples
diff --git a/test/test_gpu.py b/test/test_gpu.py
index 1e97c45..6b7fdd4 100644
--- a/test/test_gpu.py
+++ b/test/test_gpu.py
@@ -6,7 +6,6 @@
import numpy as np
import ot
-import time
import pytest
try: # test if cudamat installed
@@ -17,63 +16,81 @@ except ImportError:
@pytest.mark.skipif(nogpu, reason="No GPU available")
-def test_gpu_sinkhorn():
+def test_gpu_dist():
rng = np.random.RandomState(0)
- def describe_res(r):
- print("min:{:.3E}, max::{:.3E}, mean::{:.3E}, std::{:.3E}".format(
- np.min(r), np.max(r), np.mean(r), np.std(r)))
-
for n_samples in [50, 100, 500, 1000]:
print(n_samples)
a = rng.rand(n_samples // 4, 100)
b = rng.rand(n_samples, 100)
- time1 = time.time()
- transport = ot.da.OTDA_sinkhorn()
- transport.fit(a, b)
- G1 = transport.G
- time2 = time.time()
- transport = ot.gpu.da.OTDA_sinkhorn()
- transport.fit(a, b)
- G2 = transport.G
- time3 = time.time()
- print("Normal sinkhorn, time: {:6.2f} sec ".format(time2 - time1))
- describe_res(G1)
- print(" GPU sinkhorn, time: {:6.2f} sec ".format(time3 - time2))
- describe_res(G2)
-
- np.testing.assert_allclose(G1, G2, rtol=1e-5, atol=1e-5)
+
+ M = ot.dist(a.copy(), b.copy())
+ M2 = ot.gpu.dist(a.copy(), b.copy())
+
+ np.testing.assert_allclose(M, M2, rtol=1e-10)
+
+ M2 = ot.gpu.dist(a.copy(), b.copy(), metric='euclidean', to_numpy=False)
+
+ # check raise not implemented wrong metric
+ with pytest.raises(NotImplementedError):
+ M2 = ot.gpu.dist(a.copy(), b.copy(), metric='cityblock', to_numpy=False)
@pytest.mark.skipif(nogpu, reason="No GPU available")
-def test_gpu_sinkhorn_lpl1():
+def test_gpu_sinkhorn():
rng = np.random.RandomState(0)
- def describe_res(r):
- print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}"
- .format(np.min(r), np.max(r), np.mean(r), np.std(r)))
+ for n_samples in [50, 100, 500, 1000]:
+ a = rng.rand(n_samples // 4, 100)
+ b = rng.rand(n_samples, 100)
+
+ wa = ot.unif(n_samples // 4)
+ wb = ot.unif(n_samples)
+
+ wb2 = np.random.rand(n_samples, 20)
+ wb2 /= wb2.sum(0, keepdims=True)
+
+ M = ot.dist(a.copy(), b.copy())
+ M2 = ot.gpu.dist(a.copy(), b.copy(), to_numpy=False)
+
+ reg = 1
+
+ G = ot.sinkhorn(wa, wb, M, reg)
+ G1 = ot.gpu.sinkhorn(wa, wb, M, reg)
+
+ np.testing.assert_allclose(G1, G, rtol=1e-10)
+
+ # run all on gpu
+ ot.gpu.sinkhorn(wa, wb, M2, reg, to_numpy=False, log=True)
+
+ # run sinkhorn for multiple targets
+ ot.gpu.sinkhorn(wa, wb2, M2, reg, to_numpy=False, log=True)
+
+
+@pytest.mark.skipif(nogpu, reason="No GPU available")
+def test_gpu_sinkhorn_lpl1():
+
+ rng = np.random.RandomState(0)
for n_samples in [50, 100, 500]:
print(n_samples)
a = rng.rand(n_samples // 4, 100)
labels_a = np.random.randint(10, size=(n_samples // 4))
b = rng.rand(n_samples, 100)
- time1 = time.time()
- transport = ot.da.OTDA_lpl1()
- transport.fit(a, labels_a, b)
- G1 = transport.G
- time2 = time.time()
- transport = ot.gpu.da.OTDA_lpl1()
- transport.fit(a, labels_a, b)
- G2 = transport.G
- time3 = time.time()
- print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format(
- time2 - time1))
- describe_res(G1)
- print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format(
- time3 - time2))
- describe_res(G2)
-
- np.testing.assert_allclose(G1, G2, rtol=1e-3, atol=1e-3)
+
+ wa = ot.unif(n_samples // 4)
+ wb = ot.unif(n_samples)
+
+ M = ot.dist(a.copy(), b.copy())
+ M2 = ot.gpu.dist(a.copy(), b.copy(), to_numpy=False)
+
+ reg = 1
+
+ G = ot.da.sinkhorn_lpl1_mm(wa, labels_a, wb, M, reg)
+ G1 = ot.gpu.da.sinkhorn_lpl1_mm(wa, labels_a, wb, M, reg)
+
+ np.testing.assert_allclose(G1, G, rtol=1e-10)
+
+ ot.gpu.da.sinkhorn_lpl1_mm(wa, labels_a, wb, M2, reg, to_numpy=False, log=True)
diff --git a/test/test_gromov.py b/test/test_gromov.py
index 07cd874..43b63e1 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -28,7 +28,7 @@ def test_gromov():
C1 /= C1.max()
C2 /= C2.max()
- G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss')
+ G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss', verbose=True)
# check constratints
np.testing.assert_allclose(
@@ -69,7 +69,7 @@ def test_entropic_gromov():
C2 /= C2.max()
G = ot.gromov.entropic_gromov_wasserstein(
- C1, C2, p, q, 'square_loss', epsilon=5e-4)
+ C1, C2, p, q, 'square_loss', epsilon=5e-4, verbose=True)
# check constratints
np.testing.assert_allclose(
@@ -107,7 +107,8 @@ def test_gromov_barycenter():
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', # 5e-4,
- max_iter=100, tol=1e-3)
+ max_iter=100, tol=1e-3,
+ verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.gromov_barycenters(n_samples, [C1, C2],
@@ -134,7 +135,8 @@ def test_gromov_entropic_barycenter():
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', 2e-3,
- max_iter=100, tol=1e-3)
+ max_iter=100, tol=1e-3,
+ verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
diff --git a/test/test_ot.py b/test/test_ot.py
index 399e549..7652394 100644
--- a/test/test_ot.py
+++ b/test/test_ot.py
@@ -70,7 +70,7 @@ def test_emd_empty():
def test_emd2_multi():
- n = 1000 # nb bins
+ n = 500 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
@@ -78,7 +78,7 @@ def test_emd2_multi():
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
- ls = np.arange(20, 1000, 20)
+ ls = np.arange(20, 500, 20)
nb = len(ls)
b = np.zeros((n, nb))
for i in range(nb):
@@ -135,6 +135,21 @@ def test_lp_barycenter():
np.testing.assert_allclose(bary.sum(), 1)
+def test_free_support_barycenter():
+
+ measures_locations = [np.array([-1.]).reshape((1, 1)), np.array([1.]).reshape((1, 1))]
+ measures_weights = [np.array([1.]), np.array([1.])]
+
+ X_init = np.array([-12.]).reshape((1, 1))
+
+ # obvious barycenter location between two diracs
+ bar_locations = np.array([0.]).reshape((1, 1))
+
+ X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init)
+
+ np.testing.assert_allclose(X, bar_locations, rtol=1e-5, atol=1e-7)
+
+
@pytest.mark.skipif(not ot.lp.cvx.cvxopt, reason="No cvxopt available")
def test_lp_barycenter_cvxopt():
@@ -192,11 +207,11 @@ def test_warnings():
def test_dual_variables():
- n = 5000 # nb bins
- m = 6000 # nb bins
+ n = 500 # nb bins
+ m = 600 # nb bins
- mean1 = 1000
- mean2 = 1100
+ mean1 = 300
+ mean2 = 400
# bin positions
x = np.arange(n, dtype=np.float64)
diff --git a/test/test_plot.py b/test/test_plot.py
index f77d879..caf84de 100644
--- a/test/test_plot.py
+++ b/test/test_plot.py
@@ -5,10 +5,18 @@
# License: MIT License
import numpy as np
-import matplotlib
-matplotlib.use('Agg')
+import pytest
+try: # test if matplotlib is installed
+ import matplotlib
+ matplotlib.use('Agg')
+ nogo = False
+except ImportError:
+ nogo = True
+
+
+@pytest.mark.skipif(nogo, reason="Matplotlib not installed")
def test_plot1D_mat():
import ot
@@ -30,6 +38,7 @@ def test_plot1D_mat():
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
+@pytest.mark.skipif(nogo, reason="Matplotlib not installed")
def test_plot2D_samples_mat():
import ot
diff --git a/test/test_stochastic.py b/test/test_stochastic.py
new file mode 100644
index 0000000..f0f3fc8
--- /dev/null
+++ b/test/test_stochastic.py
@@ -0,0 +1,215 @@
+"""
+==========================
+Stochastic test
+==========================
+
+This example is designed to test the stochatic optimization algorithms module
+for descrete and semicontinous measures from the POT library.
+
+"""
+
+# Author: Kilian Fatras <kilian.fatras@gmail.com>
+#
+# License: MIT License
+
+import numpy as np
+import ot
+
+
+#############################################################################
+# COMPUTE TEST FOR SEMI-DUAL PROBLEM
+#############################################################################
+
+#############################################################################
+#
+# TEST SAG algorithm
+# ---------------------------------------------
+# 2 identical discrete measures u defined on the same space with a
+# regularization term, a learning rate and a number of iteration
+
+
+def test_stochastic_sag():
+ # test sag
+ n = 15
+ reg = 1
+ numItermax = 30000
+ rng = np.random.RandomState(0)
+
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+
+ M = ot.dist(x, x)
+
+ G = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "sag",
+ numItermax=numItermax)
+
+ # check constratints
+ np.testing.assert_allclose(
+ u, G.sum(1), atol=1e-04) # cf convergence sag
+ np.testing.assert_allclose(
+ u, G.sum(0), atol=1e-04) # cf convergence sag
+
+
+#############################################################################
+#
+# TEST ASGD algorithm
+# ---------------------------------------------
+# 2 identical discrete measures u defined on the same space with a
+# regularization term, a learning rate and a number of iteration
+
+
+def test_stochastic_asgd():
+ # test asgd
+ n = 15
+ reg = 1
+ numItermax = 100000
+ rng = np.random.RandomState(0)
+
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+
+ M = ot.dist(x, x)
+
+ G = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd",
+ numItermax=numItermax)
+
+ # check constratints
+ np.testing.assert_allclose(
+ u, G.sum(1), atol=1e-03) # cf convergence asgd
+ np.testing.assert_allclose(
+ u, G.sum(0), atol=1e-03) # cf convergence asgd
+
+
+#############################################################################
+#
+# TEST Convergence SAG and ASGD toward Sinkhorn's solution
+# --------------------------------------------------------
+# 2 identical discrete measures u defined on the same space with a
+# regularization term, a learning rate and a number of iteration
+
+
+def test_sag_asgd_sinkhorn():
+ # test all algorithms
+ n = 15
+ reg = 1
+ nb_iter = 100000
+ rng = np.random.RandomState(0)
+
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+ M = ot.dist(x, x)
+
+ G_asgd = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd",
+ numItermax=nb_iter)
+ G_sag = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "sag",
+ numItermax=nb_iter)
+ G_sinkhorn = ot.sinkhorn(u, u, M, reg)
+
+ # check constratints
+ np.testing.assert_allclose(
+ G_sag.sum(1), G_sinkhorn.sum(1), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sag.sum(0), G_sinkhorn.sum(0), atol=1e-03)
+ np.testing.assert_allclose(
+ G_asgd.sum(1), G_sinkhorn.sum(1), atol=1e-03)
+ np.testing.assert_allclose(
+ G_asgd.sum(0), G_sinkhorn.sum(0), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sag, G_sinkhorn, atol=1e-03) # cf convergence sag
+ np.testing.assert_allclose(
+ G_asgd, G_sinkhorn, atol=1e-03) # cf convergence asgd
+
+
+#############################################################################
+# COMPUTE TEST FOR DUAL PROBLEM
+#############################################################################
+
+#############################################################################
+#
+# TEST SGD algorithm
+# ---------------------------------------------
+# 2 identical discrete measures u defined on the same space with a
+# regularization term, a batch_size and a number of iteration
+
+
+def test_stochastic_dual_sgd():
+ # test sgd
+ n = 10
+ reg = 1
+ numItermax = 15000
+ batch_size = 10
+ rng = np.random.RandomState(0)
+
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+
+ M = ot.dist(x, x)
+
+ G = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size,
+ numItermax=numItermax)
+
+ # check constratints
+ np.testing.assert_allclose(
+ u, G.sum(1), atol=1e-03) # cf convergence sgd
+ np.testing.assert_allclose(
+ u, G.sum(0), atol=1e-03) # cf convergence sgd
+
+
+#############################################################################
+#
+# TEST Convergence SGD toward Sinkhorn's solution
+# --------------------------------------------------------
+# 2 identical discrete measures u defined on the same space with a
+# regularization term, a batch_size and a number of iteration
+
+
+def test_dual_sgd_sinkhorn():
+ # test all dual algorithms
+ n = 10
+ reg = 1
+ nb_iter = 15000
+ batch_size = 10
+ rng = np.random.RandomState(0)
+
+# Test uniform
+ x = rng.randn(n, 2)
+ u = ot.utils.unif(n)
+ M = ot.dist(x, x)
+
+ G_sgd = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size,
+ numItermax=nb_iter)
+
+ G_sinkhorn = ot.sinkhorn(u, u, M, reg)
+
+ # check constratints
+ np.testing.assert_allclose(
+ G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sgd, G_sinkhorn, atol=1e-03) # cf convergence sgd
+
+# Test gaussian
+ n = 30
+ reg = 1
+ batch_size = 30
+
+ a = ot.datasets.make_1D_gauss(n, 15, 5) # m= mean, s= std
+ b = ot.datasets.make_1D_gauss(n, 15, 5)
+ X_source = np.arange(n, dtype=np.float64)
+ Y_target = np.arange(n, dtype=np.float64)
+ M = ot.dist(X_source.reshape((n, 1)), Y_target.reshape((n, 1)))
+ M /= M.max()
+
+ G_sgd = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size,
+ numItermax=nb_iter)
+
+ G_sinkhorn = ot.sinkhorn(a, b, M, reg)
+
+ # check constratints
+ np.testing.assert_allclose(
+ G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-03)
+ np.testing.assert_allclose(
+ G_sgd, G_sinkhorn, atol=1e-03) # cf convergence sgd
diff --git a/test/test_utils.py b/test/test_utils.py
index b524ef6..640598d 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -12,7 +12,7 @@ import sys
def test_parmap():
- n = 100
+ n = 10
def f(i):
return 1.0 * i * i