diff options
author | RĂ©mi Flamary <remi.flamary@gmail.com> | 2021-06-17 11:46:37 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-06-17 11:46:37 +0200 |
commit | 8ef3341a472909f223ec0f678f11f136f55c1406 (patch) | |
tree | 162cbd7f7a0e3bd87c2e3b5fe61f70f25ec951c5 /test | |
parent | 2dbeeda9308029a8e8db56bed07d48f4d5718efb (diff) |
[MRG] Speedup tests (#262)
* speedup tests
* add color to tests and timings
* add test unbalanced
* stupid missing -
Diffstat (limited to 'test')
-rw-r--r-- | test/test_bregman.py | 7 | ||||
-rw-r--r-- | test/test_da.py | 8 | ||||
-rw-r--r-- | test/test_gromov.py | 15 | ||||
-rw-r--r-- | test/test_optim.py | 6 | ||||
-rw-r--r-- | test/test_stochastic.py | 40 | ||||
-rw-r--r-- | test/test_unbalanced.py | 33 |
6 files changed, 71 insertions, 38 deletions
diff --git a/test/test_bregman.py b/test/test_bregman.py index 9665229..88166a5 100644 --- a/test/test_bregman.py +++ b/test/test_bregman.py @@ -293,7 +293,7 @@ def test_unmix(): def test_empirical_sinkhorn(): # test sinkhorn - n = 100 + n = 10 a = ot.unif(n) b = ot.unif(n) @@ -332,7 +332,7 @@ def test_empirical_sinkhorn(): def test_lazy_empirical_sinkhorn(): # test sinkhorn - n = 100 + n = 10 a = ot.unif(n) b = ot.unif(n) numIterMax = 1000 @@ -342,7 +342,7 @@ def test_lazy_empirical_sinkhorn(): M = ot.dist(X_s, X_t) M_m = ot.dist(X_s, X_t, metric='minkowski') - f, g = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, numIterMax=numIterMax, isLazy=True, batchSize=(1, 1), verbose=True) + f, g = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, numIterMax=numIterMax, isLazy=True, batchSize=(1, 3), verbose=True) G_sqe = np.exp(f[:, None] + g[None, :] - M / 1) sinkhorn_sqe = ot.sinkhorn(a, b, M, 1) @@ -458,6 +458,7 @@ def test_implemented_methods(): ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) +@pytest.mark.filterwarnings("ignore:Bottleneck") def test_screenkhorn(): # test screenkhorn rng = np.random.RandomState(0) diff --git a/test/test_da.py b/test/test_da.py index 52c6a48..44bb2e9 100644 --- a/test/test_da.py +++ b/test/test_da.py @@ -106,8 +106,8 @@ def test_sinkhorn_l1l2_transport_class(): """test_sinkhorn_transport """ - ns = 150 - nt = 200 + ns = 50 + nt = 100 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) @@ -448,8 +448,8 @@ def test_mapping_transport_class(): """test_mapping_transport """ - ns = 60 - nt = 120 + ns = 20 + nt = 30 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) diff --git a/test/test_gromov.py b/test/test_gromov.py index 81138ca..56414a8 100644 --- a/test/test_gromov.py +++ b/test/test_gromov.py @@ -9,6 +9,8 @@ import numpy as np
import ot
+import pytest
+
def test_gromov():
n_samples = 50 # nb samples
@@ -128,9 +130,10 @@ def test_gromov_barycenter(): np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
+@pytest.mark.filterwarnings("ignore:divide")
def test_gromov_entropic_barycenter():
- ns = 50
- nt = 60
+ ns = 20
+ nt = 30
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
@@ -138,19 +141,19 @@ def test_gromov_entropic_barycenter(): C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
- n_samples = 3
+ n_samples = 2
Cb = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
- 'square_loss', 2e-3,
- max_iter=100, tol=1e-3,
+ 'square_loss', 1e-3,
+ max_iter=50, tol=1e-5,
verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
- 'kl_loss', 2e-3,
+ 'kl_loss', 1e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
diff --git a/test/test_optim.py b/test/test_optim.py index 48de38a..fd194c2 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -37,8 +37,8 @@ def test_conditional_gradient(): np.testing.assert_allclose(b, G.sum(0)) -def test_conditional_gradient2(): - n = 1000 # nb samples +def test_conditional_gradient_itermax(): + n = 100 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) @@ -63,7 +63,7 @@ def test_conditional_gradient2(): reg = 1e-1 - G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=200000, + G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=10000, verbose=True, log=True) np.testing.assert_allclose(a, G.sum(1)) diff --git a/test/test_stochastic.py b/test/test_stochastic.py index 155622c..98e93ec 100644 --- a/test/test_stochastic.py +++ b/test/test_stochastic.py @@ -30,7 +30,7 @@ import ot def test_stochastic_sag(): # test sag - n = 15 + n = 10 reg = 1 numItermax = 30000 rng = np.random.RandomState(0) @@ -45,9 +45,9 @@ def test_stochastic_sag(): # check constratints np.testing.assert_allclose( - u, G.sum(1), atol=1e-04) # cf convergence sag + u, G.sum(1), atol=1e-03) # cf convergence sag np.testing.assert_allclose( - u, G.sum(0), atol=1e-04) # cf convergence sag + u, G.sum(0), atol=1e-03) # cf convergence sag ############################################################################# @@ -60,9 +60,9 @@ def test_stochastic_sag(): def test_stochastic_asgd(): # test asgd - n = 15 + n = 10 reg = 1 - numItermax = 100000 + numItermax = 10000 rng = np.random.RandomState(0) x = rng.randn(n, 2) @@ -75,9 +75,9 @@ def test_stochastic_asgd(): # check constratints np.testing.assert_allclose( - u, G.sum(1), atol=1e-03) # cf convergence asgd + u, G.sum(1), atol=1e-02) # cf convergence asgd np.testing.assert_allclose( - u, G.sum(0), atol=1e-03) # cf convergence asgd + u, G.sum(0), atol=1e-02) # cf convergence asgd ############################################################################# @@ -90,9 +90,9 @@ def test_stochastic_asgd(): def test_sag_asgd_sinkhorn(): # test all algorithms - n = 15 + n = 10 reg = 1 - nb_iter = 100000 + nb_iter = 10000 rng = np.random.RandomState(0) x = rng.randn(n, 2) @@ -107,17 +107,17 @@ def test_sag_asgd_sinkhorn(): # check constratints np.testing.assert_allclose( - G_sag.sum(1), G_sinkhorn.sum(1), atol=1e-03) + G_sag.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( - G_sag.sum(0), G_sinkhorn.sum(0), atol=1e-03) + G_sag.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( - G_asgd.sum(1), G_sinkhorn.sum(1), atol=1e-03) + G_asgd.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( - G_asgd.sum(0), G_sinkhorn.sum(0), atol=1e-03) + G_asgd.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( - G_sag, G_sinkhorn, atol=1e-03) # cf convergence sag + G_sag, G_sinkhorn, atol=1e-02) # cf convergence sag np.testing.assert_allclose( - G_asgd, G_sinkhorn, atol=1e-03) # cf convergence asgd + G_asgd, G_sinkhorn, atol=1e-02) # cf convergence asgd ############################################################################# @@ -136,7 +136,7 @@ def test_stochastic_dual_sgd(): # test sgd n = 10 reg = 1 - numItermax = 15000 + numItermax = 5000 batch_size = 10 rng = np.random.RandomState(0) @@ -167,7 +167,7 @@ def test_dual_sgd_sinkhorn(): # test all dual algorithms n = 10 reg = 1 - nb_iter = 15000 + nb_iter = 5000 batch_size = 10 rng = np.random.RandomState(0) @@ -183,11 +183,11 @@ def test_dual_sgd_sinkhorn(): # check constratints np.testing.assert_allclose( - G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-03) + G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( - G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-03) + G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( - G_sgd, G_sinkhorn, atol=1e-03) # cf convergence sgd + G_sgd, G_sinkhorn, atol=1e-02) # cf convergence sgd # Test gaussian n = 30 diff --git a/test/test_unbalanced.py b/test/test_unbalanced.py index dfeaad9..e8349d1 100644 --- a/test/test_unbalanced.py +++ b/test/test_unbalanced.py @@ -115,7 +115,8 @@ def test_stabilized_vs_sinkhorn(): G, log = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, reg=epsilon, method="sinkhorn_stabilized", reg_m=reg_m, - log=True) + log=True, + verbose=True) G2, log2 = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, method="sinkhorn", log=True) @@ -138,7 +139,7 @@ def test_unbalanced_barycenter(method): reg_m = 1. q, log = barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m, - method=method, log=True) + method=method, log=True, verbose=True) # check fixed point equations fi = reg_m / (reg_m + epsilon) logA = np.log(A + 1e-16) @@ -173,6 +174,7 @@ def test_barycenter_stabilized_vs_sinkhorn(): reg_m=reg_m, log=True, tau=100, method="sinkhorn_stabilized", + verbose=True ) q, log = barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m, method="sinkhorn", @@ -182,6 +184,33 @@ def test_barycenter_stabilized_vs_sinkhorn(): q, qstable, atol=1e-05) +def test_wrong_method(): + + n = 10 + rng = np.random.RandomState(42) + + x = rng.randn(n, 2) + a = ot.utils.unif(n) + + # make dists unbalanced + b = ot.utils.unif(n) * 1.5 + + M = ot.dist(x, x) + epsilon = 1. + reg_m = 1. + + with pytest.raises(ValueError): + ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon, + reg_m=reg_m, + method='badmethod', + log=True, + verbose=True) + with pytest.raises(ValueError): + ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, + method='badmethod', + verbose=True) + + def test_implemented_methods(): IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized'] TO_BE_IMPLEMENTED_METHODS = ['sinkhorn_reg_scaling'] |