summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKilian <kilian.fatras@gmail.com>2019-11-15 16:10:37 +0100
committerKilian <kilian.fatras@gmail.com>2019-11-15 16:10:37 +0100
commit0280a3441b09c781035cda3b74213ec92026ff9e (patch)
tree23d068d2485b919a67696088d603e22e32d76ff0
parent2a32e2ea64d0d5096953a9b8259b0507fa58dca5 (diff)
fix bug numItermax emd in cg
-rw-r--r--ot/optim.py6
-rw-r--r--test/test_optim.py33
2 files changed, 37 insertions, 2 deletions
diff --git a/ot/optim.py b/ot/optim.py
index 0abd9e9..4012e0d 100644
--- a/ot/optim.py
+++ b/ot/optim.py
@@ -134,7 +134,7 @@ def solve_linesearch(cost, G, deltaG, Mi, f_val,
return alpha, fc, f_val
-def cg(a, b, M, reg, f, df, G0=None, numItermax=200,
+def cg(a, b, M, reg, f, df, G0=None, numItermax=200, numItermaxEmd=100000,
stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs):
"""
Solve the general regularized OT problem with conditional gradient
@@ -172,6 +172,8 @@ def cg(a, b, M, reg, f, df, G0=None, numItermax=200,
initial guess (default is indep joint density)
numItermax : int, optional
Max number of iterations
+ numItermaxEmd : int, optional
+ Max number of iterations for emd
stopThr : float, optional
Stop threshol on the relative variation (>0)
stopThr2 : float, optional
@@ -238,7 +240,7 @@ def cg(a, b, M, reg, f, df, G0=None, numItermax=200,
Mi += Mi.min()
# solve linear program
- Gc = emd(a, b, Mi)
+ Gc = emd(a, b, Mi, numItermax=numItermaxEmd)
deltaG = Gc - G
diff --git a/test/test_optim.py b/test/test_optim.py
index ae31e1f..aade36e 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -37,6 +37,39 @@ def test_conditional_gradient():
np.testing.assert_allclose(b, G.sum(0))
+def test_conditional_gradient2():
+ n = 4000 # nb samples
+
+ mu_s = np.array([0, 0])
+ cov_s = np.array([[1, 0], [0, 1]])
+
+ mu_t = np.array([4, 4])
+ cov_t = np.array([[1, -.8], [-.8, 1]])
+
+ xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s)
+ xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)
+
+ a, b = np.ones((n,)) / n, np.ones((n,)) / n
+
+ # loss matrix
+ M = ot.dist(xs, xt)
+ M /= M.max()
+
+ def f(G):
+ return 0.5 * np.sum(G**2)
+
+ def df(G):
+ return G
+
+ reg = 1e-1
+
+ G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=200000,
+ verbose=True, log=True)
+
+ np.testing.assert_allclose(a, G.sum(1))
+ np.testing.assert_allclose(b, G.sum(0))
+
+
def test_generalized_conditional_gradient():
n_bins = 100 # nb bins