summaryrefslogtreecommitdiff
path: root/ot/gpu/da.py
diff options
context:
space:
mode:
authorRémi Flamary <remi.flamary@gmail.com>2018-09-28 15:32:53 +0200
committerRémi Flamary <remi.flamary@gmail.com>2018-09-28 15:32:53 +0200
commitc18f73739fdd8e8ca0ef88dddcf2ba039a85dacf (patch)
tree9f00d6baa9d68457917190cdad807c027fc85b56 /ot/gpu/da.py
parentc531fba46d9ea703a8a5ae270efe44466d54a593 (diff)
parent8f6c45534dcdd6e8f80a31205be28481fc79c533 (diff)
merge with new gpu stuff
Diffstat (limited to 'ot/gpu/da.py')
-rw-r--r--ot/gpu/da.py136
1 files changed, 55 insertions, 81 deletions
diff --git a/ot/gpu/da.py b/ot/gpu/da.py
index ac3f8d7..f5e7daa 100644
--- a/ot/gpu/da.py
+++ b/ot/gpu/da.py
@@ -11,79 +11,30 @@ Domain adaptation with optimal transport with GPU implementation
# License: MIT License
-import numpy as np
-#from ..utils import unif
+import cupy as np # np used for matrix computation
+import cupy as cp # cp used for cupy specific operations
+import numpy as npp
+from . import utils
+
from .bregman import sinkhorn
-import cudamat
-def pairwiseEuclideanGPU(a, b, returnAsGPU=False, squared=False):
+def sinkhorn_lpl1_mm(a, labels_a, b, M, reg, eta=0.1, numItermax=10,
+ numInnerItermax=200, stopInnerThr=1e-9, verbose=False,
+ log=False, to_numpy=True):
"""
- Compute the pairwise euclidean distance between matrices a and b.
-
-
- Parameters
- ----------
- a : np.ndarray (n, f)
- first matrice
- b : np.ndarray (m, f)
- second matrice
- returnAsGPU : boolean, optional (default False)
- if True, returns cudamat matrix still on GPU, else return np.ndarray
- squared : boolean, optional (default False)
- if True, return squared euclidean distance matrice
+ Solve the entropic regularization optimal transport problem with nonconvex
+ group lasso regularization on GPU
+ If the input matrix are in numpy format, they will be uploaded to the
+ GPU first which can incur significant time overhead.
- Returns
- -------
- c : (n x m) np.ndarray or cudamat.CUDAMatrix
- pairwise euclidean distance distance matrix
- """
- # a is shape (n, f) and b shape (m, f). Return matrix c of shape (n, m).
- # First compute in c_GPU the squared euclidean distance. And return its
- # square root. At each cell [i,j] of c, we want to have
- # sum{k in range(f)} ( (a[i,k] - b[j,k])^2 ). We know that
- # (a-b)^2 = a^2 -2ab +b^2. Thus we want to have in each cell of c:
- # sum{k in range(f)} ( a[i,k]^2 -2a[i,k]b[j,k] +b[j,k]^2).
-
- a_GPU = cudamat.CUDAMatrix(a)
- b_GPU = cudamat.CUDAMatrix(b)
-
- # Multiply a by b transpose to obtain in each cell [i,j] of c the
- # value sum{k in range(f)} ( a[i,k]b[j,k] )
- c_GPU = cudamat.dot(a_GPU, b_GPU.transpose())
- # multiply by -2 to have sum{k in range(f)} ( -2a[i,k]b[j,k] )
- c_GPU.mult(-2)
-
- # Compute the vectors of the sum of squared elements.
- a_GPU = cudamat.pow(a_GPU, 2).sum(axis=1)
- b_GPU = cudamat.pow(b_GPU, 2).sum(axis=1)
-
- # Add the vectors in each columns (respectivly rows) of c.
- # sum{k in range(f)} ( a[i,k]^2 -2a[i,k]b[j,k] )
- c_GPU.add_col_vec(a_GPU)
- # sum{k in range(f)} ( a[i,k]^2 -2a[i,k]b[j,k] +b[j,k]^2)
- c_GPU.add_row_vec(b_GPU.transpose())
-
- if not squared:
- c_GPU = cudamat.sqrt(c_GPU)
-
- if returnAsGPU:
- return c_GPU
- else:
- return c_GPU.asarray()
-
-
-def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
- numInnerItermax=200, stopInnerThr=1e-9,
- verbose=False, log=False):
- """
- Solve the entropic regularization optimal transport problem with nonconvex group lasso regularization
The function solves the following optimization problem:
.. math::
- \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega_e(\gamma)+ \eta \Omega_g(\gamma)
+ \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega_e(\gamma)
+ + \eta \Omega_g(\gamma)
s.t. \gamma 1 = a
@@ -93,11 +44,16 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
where :
- M is the (ns,nt) metric cost matrix
- - :math:`\Omega_e` is the entropic regularization term :math:`\Omega_e(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- - :math:`\Omega_g` is the group lasso regulaization term :math:`\Omega_g(\gamma)=\sum_{i,c} \|\gamma_{i,\mathcal{I}_c}\|^{1/2}_1` where :math:`\mathcal{I}_c` are the index of samples from class c in the source domain.
+ - :math:`\Omega_e` is the entropic regularization term
+ :math:`\Omega_e(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
+ - :math:`\Omega_g` is the group lasso regulaization term
+ :math:`\Omega_g(\gamma)=\sum_{i,c} \|\gamma_{i,\mathcal{I}_c}\|^{1/2}_1`
+ where :math:`\mathcal{I}_c` are the index of samples from class c
+ in the source domain.
- a and b are source and target weights (sum to 1)
- The algorithm used for solving the problem is the generalised conditional gradient as proposed in [5]_ [7]_
+ The algorithm used for solving the problem is the generalised conditional
+ gradient as proposed in [5]_ [7]_
Parameters
@@ -108,7 +64,7 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
labels of samples in the source domain
b : np.ndarray (nt,)
samples weights in the target domain
- M_GPU : cudamat.CUDAMatrix (ns,nt)
+ M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term for entropic regularization >0
@@ -124,6 +80,8 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
Print information along iterations
log : bool, optional
record log if True
+ to_numpy : boolean, optional (default True)
+ If true convert back the GPU array result to numpy format.
Returns
@@ -137,8 +95,13 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
References
----------
- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1
- .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567.
+ .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy,
+ "Optimal Transport for Domain Adaptation," in IEEE
+ Transactions on Pattern Analysis and Machine Intelligence ,
+ vol.PP, no.99, pp.1-1
+ .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015).
+ Generalized conditional gradient: analysis of convergence
+ and applications. arXiv preprint arXiv:1510.06567.
See Also
--------
@@ -147,29 +110,30 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
ot.optim.cg : General regularized OT
"""
+
+ a, labels_a, b, M = utils.to_gpu(a, labels_a, b, M)
+
p = 0.5
epsilon = 1e-3
- Nfin = len(b)
indices_labels = []
- classes = np.unique(labels_a)
+ labels_a2 = cp.asnumpy(labels_a)
+ classes = npp.unique(labels_a2)
for c in classes:
- idxc, = np.where(labels_a == c)
- indices_labels.append(cudamat.CUDAMatrix(idxc.reshape(1, -1)))
+ idxc, = utils.to_gpu(npp.where(labels_a2 == c))
+ indices_labels.append(idxc)
- Mreg_GPU = cudamat.empty(M_GPU.shape)
- W_GPU = cudamat.empty(M_GPU.shape).assign(0)
+ W = np.zeros(M.shape)
for cpt in range(numItermax):
- Mreg_GPU.assign(M_GPU)
- Mreg_GPU.add_mult(W_GPU, eta)
- transp_GPU = sinkhorn(a, b, Mreg_GPU, reg, numItermax=numInnerItermax,
- stopThr=stopInnerThr, returnAsGPU=True)
+ Mreg = M + eta * W
+ transp = sinkhorn(a, b, Mreg, reg, numItermax=numInnerItermax,
+ stopThr=stopInnerThr, to_numpy=False)
# the transport has been computed. Check if classes are really
# separated
- W_GPU.assign(1)
- W_GPU = W_GPU.transpose()
+ W = np.ones(M.shape)
for (i, c) in enumerate(classes):
+<<<<<<< HEAD
(_, nbRow) = indices_labels[i].shape
tmpC_GPU = cudamat.empty((Nfin, nbRow)).assign(0)
transp_GPU.transpose().select_columns(indices_labels[i], tmpC_GPU)
@@ -184,3 +148,13 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M_GPU, reg, eta=0.1, numItermax=10,
W_GPU = W_GPU.transpose()
return transp_GPU.asarray()
+=======
+ majs = np.sum(transp[indices_labels[i]], axis=0)
+ majs = p * ((majs + epsilon)**(p - 1))
+ W[indices_labels[i]] = majs
+
+ if to_numpy:
+ return utils.to_np(transp)
+ else:
+ return transp
+>>>>>>> master