From 2a32e2ea64d0d5096953a9b8259b0507fa58dca5 Mon Sep 17 00:00:00 2001 From: Kilian Date: Wed, 13 Nov 2019 13:55:24 +0100 Subject: fix log bug in gromov_wasserstein2 --- test/test_gromov.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'test') diff --git a/test/test_gromov.py b/test/test_gromov.py index 70fa83f..43da9fc 100644 --- a/test/test_gromov.py +++ b/test/test_gromov.py @@ -44,10 +44,14 @@ def test_gromov(): gw, log = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=True) + gw_val = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=False) + G = log['T'] np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(gw, gw_val, atol=1e-1, rtol=1e-1) # cf log=False + # check constratints np.testing.assert_allclose( p, G.sum(1), atol=1e-04) # cf convergence gromov -- cgit v1.2.3 From 0280a3441b09c781035cda3b74213ec92026ff9e Mon Sep 17 00:00:00 2001 From: Kilian Date: Fri, 15 Nov 2019 16:10:37 +0100 Subject: fix bug numItermax emd in cg --- ot/optim.py | 6 ++++-- test/test_optim.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) (limited to 'test') diff --git a/ot/optim.py b/ot/optim.py index 0abd9e9..4012e0d 100644 --- a/ot/optim.py +++ b/ot/optim.py @@ -134,7 +134,7 @@ def solve_linesearch(cost, G, deltaG, Mi, f_val, return alpha, fc, f_val -def cg(a, b, M, reg, f, df, G0=None, numItermax=200, +def cg(a, b, M, reg, f, df, G0=None, numItermax=200, numItermaxEmd=100000, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs): """ Solve the general regularized OT problem with conditional gradient @@ -172,6 +172,8 @@ def cg(a, b, M, reg, f, df, G0=None, numItermax=200, initial guess (default is indep joint density) numItermax : int, optional Max number of iterations + numItermaxEmd : int, optional + Max number of iterations for emd stopThr : float, optional Stop threshol on the relative variation (>0) stopThr2 : float, optional @@ -238,7 +240,7 @@ def cg(a, b, M, reg, f, df, G0=None, numItermax=200, Mi += Mi.min() # solve linear program - Gc = emd(a, b, Mi) + Gc = emd(a, b, Mi, numItermax=numItermaxEmd) deltaG = Gc - G diff --git a/test/test_optim.py b/test/test_optim.py index ae31e1f..aade36e 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -37,6 +37,39 @@ def test_conditional_gradient(): np.testing.assert_allclose(b, G.sum(0)) +def test_conditional_gradient2(): + n = 4000 # nb samples + + mu_s = np.array([0, 0]) + cov_s = np.array([[1, 0], [0, 1]]) + + mu_t = np.array([4, 4]) + cov_t = np.array([[1, -.8], [-.8, 1]]) + + xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) + xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) + + a, b = np.ones((n,)) / n, np.ones((n,)) / n + + # loss matrix + M = ot.dist(xs, xt) + M /= M.max() + + def f(G): + return 0.5 * np.sum(G**2) + + def df(G): + return G + + reg = 1e-1 + + G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=200000, + verbose=True, log=True) + + np.testing.assert_allclose(a, G.sum(1)) + np.testing.assert_allclose(b, G.sum(0)) + + def test_generalized_conditional_gradient(): n_bins = 100 # nb bins -- cgit v1.2.3