summaryrefslogtreecommitdiff
path: root/ot/optim.py
blob: 632eac1af686dc4e3f5cb08e66fbacd7d5c28d72 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
# -*- coding: utf-8 -*-
"""
Optimization algorithms for OT
"""

import numpy as np
import scipy as sp
from scipy.optimize.linesearch import scalar_search_armijo
from lp import emd

# The corresponding scipy function does not work for matrices
def line_search_armijo(f,xk,pk,gfk,old_fval,args=(),c1=1e-4,alpha0=0.99):
    """
    Armijo linesearch function that works with matrices
    
    find an approximate minimum of f(xk+alpha*pk) that satifies the 
    armijo conditions. 
    
    Parameters
    ----------

    f : function
        loss function
    xk : np.ndarray 
        initial position
    pk : np.ndarray 
        descent direction
    gfk : np.ndarray
        gradient of f at xk        
    old_fval: float
        loss value at xk
    args : tuple, optional
        arguments given to f
    c1 : float, optional
        c1 const in armijo rule (>0)
    alpha0 : float, optional
        initial step (>0)
  
    Returns
    -------
    alpha : float
        step that satisfy armijo conditions
    fc : int
        nb of function call
    fa : float
        loss value at step alpha
    
    """
    xk = np.atleast_1d(xk)
    fc = [0]
    
    def phi(alpha1):
        fc[0] += 1
        return f(xk + alpha1*pk, *args)
    
    if old_fval is None:
        phi0 = phi(0.)
    else:
        phi0 = old_fval
    
    derphi0 = np.sum(pk*gfk) # Quickfix for matrices
    alpha,phi1 = scalar_search_armijo(phi,phi0,derphi0,c1=c1,alpha0=alpha0)
    
    return alpha,fc[0],phi1


def cg(a,b,M,reg,f,df,G0=None,numItermax = 200,stopThr=1e-9,verbose=False,log=False):
    """
    Solve the general regularized OT problem with conditional gradient
    
        The function solves the following optimization problem:
    
    .. math::
        \gamma = arg\min_\gamma <\gamma,M>_F + reg*f(\gamma)
        
        s.t. \gamma 1 = a
        
             \gamma^T 1= b 
             
             \gamma\geq 0
    where :
    
    - M is the (ns,nt) metric cost matrix
    - :math:`f` is the regularization term ( and df is its gradient)
    - a and b are source and target weights (sum to 1)
    
    The algorithm used for solving the problem is conditional gradient as discussed in  [1]_
    
             
    Parameters
    ----------
    a : np.ndarray (ns,)
        samples weights in the source domain
    b : np.ndarray (nt,)
        samples in the target domain
    M : np.ndarray (ns,nt)
        loss matrix        
    reg : float
        Regularization term >0
    G0 :  np.ndarray (ns,nt), optional
        initial guess (default is indep joint density)
    numItermax : int, optional
        Max number of iterations
    stopThr : float, optional
        Stop threshol on error (>0)
    verbose : bool, optional
        Print information along iterations
    log : bool, optional
        record log if True
    
    Returns
    -------
    gamma: (ns x nt) ndarray
        Optimal transportation matrix for the given parameters
    log: dict
        log dictionary return only if log==True in parameters 
    
        
    References
    ----------
    
    .. [1] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
        
    See Also
    --------
    ot.lp.emd : Unregularized optimal ransport
    ot.bregman.sinkhorn : Entropic regularized optimal transport
           
    """
    
    loop=1
    
    if log:
        log={'loss':[]}
    
    if G0 is None:
        G=np.outer(a,b)
    else:
        G=G0
    
    def cost(G):
        return np.sum(M*G)+reg*f(G)
        
    f_val=cost(G)
    if log:
        log['loss'].append(f_val)
    
    it=0
    
    if verbose:
        print('{:5s}|{:12s}|{:8s}'.format('It.','Loss','Delta loss')+'\n'+'-'*32)
        print('{:5d}|{:8e}|{:8e}'.format(it,f_val,0))
    
    while loop:
        
        it+=1
        old_fval=f_val
        
        
        # problem linearization
        Mi=M+reg*df(G)
        
        # solve linear program
        Gc=emd(a,b,Mi)
        
        deltaG=Gc-G
        
        # line search
        alpha,fc,f_val = line_search_armijo(cost,G,deltaG,Mi,f_val)
        
        G=G+alpha*deltaG
        
        # test convergence
        if it>=numItermax:
            loop=0
        
        delta_fval=(f_val-old_fval)/abs(f_val)
        if abs(delta_fval)<stopThr:
            loop=0
            
        
        if log:
            log['loss'].append(f_val)        
        
        if verbose:
            if it%20 ==0:
                print('{:5s}|{:12s}|{:8s}'.format('It.','Loss','Delta loss')+'\n'+'-'*32)
            print('{:5d}|{:8e}|{:8e}'.format(it,f_val,delta_fval))

    
    if log:
        return G,log
    else:
        return G