diff options
author | Gard Spreemann <gspr@nonempty.org> | 2020-01-20 14:07:53 +0100 |
---|---|---|
committer | Gard Spreemann <gspr@nonempty.org> | 2020-01-20 14:07:53 +0100 |
commit | bdfb24ff37ea777d6e266b145047cd4e281ebac3 (patch) | |
tree | 00cbac5f3dc25a4ee76164828abd72c1cbab37cc /test/test_optim.py | |
parent | abc441b00f0fe2fa4ef0efc4e1aa67b27cca9a13 (diff) | |
parent | 5e70a77fbb2feec513f21c9ef65dcc535329ace6 (diff) |
Merge tag '0.6.0' into debian/sid
Diffstat (limited to 'test/test_optim.py')
-rw-r--r-- | test/test_optim.py | 73 |
1 files changed, 73 insertions, 0 deletions
diff --git a/test/test_optim.py b/test/test_optim.py new file mode 100644 index 0000000..ae31e1f --- /dev/null +++ b/test/test_optim.py @@ -0,0 +1,73 @@ +"""Tests for module optim fro OT optimization """ + +# Author: Remi Flamary <remi.flamary@unice.fr> +# +# License: MIT License + +import numpy as np +import ot + + +def test_conditional_gradient(): + + n_bins = 100 # nb bins + np.random.seed(0) + # bin positions + x = np.arange(n_bins, dtype=np.float64) + + # Gaussian distributions + a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std + b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10) + + # loss matrix + M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1))) + M /= M.max() + + def f(G): + return 0.5 * np.sum(G**2) + + def df(G): + return G + + reg = 1e-1 + + G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True) + + np.testing.assert_allclose(a, G.sum(1)) + np.testing.assert_allclose(b, G.sum(0)) + + +def test_generalized_conditional_gradient(): + + n_bins = 100 # nb bins + np.random.seed(0) + # bin positions + x = np.arange(n_bins, dtype=np.float64) + + # Gaussian distributions + a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std + b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10) + + # loss matrix + M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1))) + M /= M.max() + + def f(G): + return 0.5 * np.sum(G**2) + + def df(G): + return G + + reg1 = 1e-3 + reg2 = 1e-1 + + G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True) + + np.testing.assert_allclose(a, G.sum(1), atol=1e-05) + np.testing.assert_allclose(b, G.sum(0), atol=1e-05) + + +def test_solve_1d_linesearch_quad_funct(): + np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(1, -1, 0), 0.5) + np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 5, 0), 0) + np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 0.5, 0), 1) |