1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
"""Tests for module optim fro OT optimization """
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import ot
def test_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_generalized_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1), atol=1e-05)
np.testing.assert_allclose(b, G.sum(0), atol=1e-05)
|