summaryrefslogtreecommitdiff
path: root/ot
diff options
context:
space:
mode:
authorievred <ievgen.redko@univ-st-etienne.fr>2020-04-08 10:28:57 +0200
committerievred <ievgen.redko@univ-st-etienne.fr>2020-04-08 10:28:57 +0200
commitd6ef8676cc3f94ba5d80acc9fd9745c9ed91819a (patch)
treee9a17a904b12748ac9f7bfb602da55fe3c23d7f4 /ot
parent2c9f992157844d6253a302905417e86580ac6b12 (diff)
remove jcpot from laplace
Diffstat (limited to 'ot')
-rw-r--r--ot/bregman.py160
-rw-r--r--ot/da.py181
2 files changed, 2 insertions, 339 deletions
diff --git a/ot/bregman.py b/ot/bregman.py
index 61dfa52..f737e81 100644
--- a/ot/bregman.py
+++ b/ot/bregman.py
@@ -1503,166 +1503,6 @@ def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000,
return np.sum(K0, axis=1)
-def jcpot_barycenter(Xs, Ys, Xt, reg, metric='sqeuclidean', numItermax=100,
- stopThr=1e-6, verbose=False, log=False, **kwargs):
- r'''Joint OT and proportion estimation for multi-source target shift as proposed in [27]
-
- The function solves the following optimization problem:
-
- .. math::
-
- \mathbf{h} = arg\min_{\mathbf{h}}\quad \sum_{k=1}^{K} \lambda_k
- W_{reg}((\mathbf{D}_2^{(k)} \mathbf{h})^T, \mathbf{a})
-
- s.t. \ \forall k, \mathbf{D}_1^{(k)} \gamma_k \mathbf{1}_n= \mathbf{h}
-
- where :
-
- - :math:`\lambda_k` is the weight of k-th source domain
- - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see ot.bregman.sinkhorn)
- - :math:`\mathbf{D}_2^{(k)}` is a matrix of weights related to k-th source domain defined as in [p. 5, 27], its expected shape is `(n_k, C)` where `n_k` is the number of elements in the k-th source domain and `C` is the number of classes
- - :math:`\mathbf{h}` is a vector of estimated proportions in the target domain of size C
- - :math:`\mathbf{a}` is a uniform vector of weights in the target domain of size `n`
- - :math:`\mathbf{D}_1^{(k)}` is a matrix of class assignments defined as in [p. 5, 27], its expected shape is `(n_k, C)`
-
- The problem consist in solving a Wasserstein barycenter problem to estimate the proportions :math:`\mathbf{h}` in the target domain.
-
- The algorithm used for solving the problem is the Iterative Bregman projections algorithm
- with two sets of marginal constraints related to the unknown vector :math:`\mathbf{h}` and uniform tarhet distribution.
-
- Parameters
- ----------
- Xs : list of K np.ndarray(nsk,d)
- features of all source domains' samples
- Ys : list of K np.ndarray(nsk,)
- labels of all source domains' samples
- Xt : np.ndarray (nt,d)
- samples in the target domain
- reg : float
- Regularization term > 0
- metric : string, optional (default="sqeuclidean")
- The ground metric for the Wasserstein problem
- numItermax : int, optional
- Max number of iterations
- stopThr : float, optional
- Stop threshold on relative change in the barycenter (>0)
- log : bool, optional
- record log if True
- verbose : bool, optional (default=False)
- Controls the verbosity of the optimization algorithm
-
- Returns
- -------
- gamma : List of K (nsk x nt) ndarrays
- Optimal transportation matrices for the given parameters for each pair of source and target domains
- h : (C,) ndarray
- proportion estimation in the target domain
- log : dict
- log dictionary return only if log==True in parameters
-
-
- References
- ----------
-
- .. [27] Ievgen Redko, Nicolas Courty, Rémi Flamary, Devis Tuia
- "Optimal transport for multi-source domain adaptation under target shift",
- International Conference on Artificial Intelligence and Statistics (AISTATS), 2019.
-
- '''
- nbclasses = len(np.unique(Ys[0]))
- nbdomains = len(Xs)
-
- # log dictionary
- if log:
- log = {'niter': 0, 'err': [], 'M': [], 'D1': [], 'D2': []}
-
- K = []
- M = []
- D1 = []
- D2 = []
-
- # For each source domain, build cost matrices M, Gibbs kernels K and corresponding matrices D_1 and D_2
- for d in range(nbdomains):
- dom = {}
- nsk = Xs[d].shape[0] # get number of elements for this domain
- dom['nbelem'] = nsk
- classes = np.unique(Ys[d]) # get number of classes for this domain
-
- # format classes to start from 0 for convenience
- if np.min(classes) != 0:
- Ys[d] = Ys[d] - np.min(classes)
- classes = np.unique(Ys[d])
-
- # build the corresponding D_1 and D_2 matrices
- Dtmp1 = np.zeros((nbclasses, nsk))
- Dtmp2 = np.zeros((nbclasses, nsk))
-
- for c in classes:
- nbelemperclass = np.sum(Ys[d] == c)
- if nbelemperclass != 0:
- Dtmp1[int(c), Ys[d] == c] = 1.
- Dtmp2[int(c), Ys[d] == c] = 1. / (nbelemperclass)
- D1.append(Dtmp1)
- D2.append(Dtmp2)
-
- # build the cost matrix and the Gibbs kernel
- Mtmp = dist(Xs[d], Xt, metric=metric)
- Mtmp = Mtmp / np.median(Mtmp)
- M.append(Mtmp)
-
- Ktmp = np.empty(Mtmp.shape, dtype=Mtmp.dtype)
- np.divide(Mtmp, -reg, out=Ktmp)
- np.exp(Ktmp, out=Ktmp)
- K.append(Ktmp)
-
- # uniform target distribution
- a = unif(np.shape(Xt)[0])
-
- cpt = 0 # iterations count
- err = 1
- old_bary = np.ones((nbclasses))
-
- while (err > stopThr and cpt < numItermax):
-
- bary = np.zeros((nbclasses))
-
- # update coupling matrices for marginal constraints w.r.t. uniform target distribution
- for d in range(nbdomains):
- K[d] = projC(K[d], a)
- other = np.sum(K[d], axis=1)
- bary = bary + np.log(np.dot(D1[d], other)) / nbdomains
-
- bary = np.exp(bary)
-
- # update coupling matrices for marginal constraints w.r.t. unknown proportions based on [Prop 4., 27]
- for d in range(nbdomains):
- new = np.dot(D2[d].T, bary)
- K[d] = projR(K[d], new)
-
- err = np.linalg.norm(bary - old_bary)
- cpt = cpt + 1
- old_bary = bary
-
- if log:
- log['err'].append(err)
-
- if verbose:
- if cpt % 200 == 0:
- print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
- print('{:5d}|{:8e}|'.format(cpt, err))
-
- bary = bary / np.sum(bary)
-
- if log:
- log['niter'] = cpt
- log['M'] = M
- log['D1'] = D1
- log['D2'] = D2
- return K, bary, log
- else:
- return K, bary
-
-
def empirical_sinkhorn(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean',
numIterMax=10000, stopThr=1e-9, verbose=False,
log=False, **kwargs):
diff --git a/ot/da.py b/ot/da.py
index 0fdd3be..474c944 100644
--- a/ot/da.py
+++ b/ot/da.py
@@ -14,7 +14,7 @@ Domain adaptation with optimal transport
import numpy as np
import scipy.linalg as linalg
-from .bregman import sinkhorn, jcpot_barycenter
+from .bregman import sinkhorn
from .lp import emd
from .utils import unif, dist, kernel, cost_normalization, laplacian
from .utils import check_params, BaseEstimator
@@ -2121,181 +2121,4 @@ class UnbalancedSinkhornTransport(BaseTransport):
self.coupling_ = returned_
self.log_ = dict()
- return self
-
-
-class JCPOTTransport(BaseTransport):
-
- """Domain Adapatation OT method for multi-source target shift based on Wasserstein barycenter algorithm.
-
- Parameters
- ----------
- reg_e : float, optional (default=1)
- Entropic regularization parameter
- max_iter : int, float, optional (default=10)
- The minimum number of iteration before stopping the optimization
- algorithm if no it has not converged
- tol : float, optional (default=10e-9)
- Stop threshold on error (inner sinkhorn solver) (>0)
- verbose : bool, optional (default=False)
- Controls the verbosity of the optimization algorithm
- log : bool, optional (default=False)
- Controls the logs of the optimization algorithm
- metric : string, optional (default="sqeuclidean")
- The ground metric for the Wasserstein problem
- norm : string, optional (default=None)
- If given, normalize the ground metric to avoid numerical errors that
- can occur with large metric values.
- distribution_estimation : callable, optional (defaults to the uniform)
- The kind of distribution estimation to employ
- out_of_sample_map : string, optional (default="ferradans")
- The kind of out of sample mapping to apply to transport samples
- from a domain into another one. Currently the only possible option is
- "ferradans" which uses the method proposed in [6].
-
- Attributes
- ----------
- coupling_ : list of array-like objects, shape K x (n_source_samples, n_target_samples)
- A set of optimal couplings between each source domain and the target domain
- proportions_ : array-like, shape (n_classes,)
- Estimated class proportions in the target domain
- log_ : dictionary
- The dictionary of log, empty dic if parameter log is not True
-
- References
- ----------
-
- .. [1] Ievgen Redko, Nicolas Courty, Rémi Flamary, Devis Tuia
- "Optimal transport for multi-source domain adaptation under target shift",
- International Conference on Artificial Intelligence and Statistics (AISTATS),
- vol. 89, p.849-858, 2019.
-
- """
-
- def __init__(self, reg_e=.1, max_iter=10,
- tol=10e-9, verbose=False, log=False,
- metric="sqeuclidean",
- out_of_sample_map='ferradans'):
- self.reg_e = reg_e
- self.max_iter = max_iter
- self.tol = tol
- self.verbose = verbose
- self.log = log
- self.metric = metric
- self.out_of_sample_map = out_of_sample_map
-
- def fit(self, Xs, ys=None, Xt=None, yt=None):
- """Building coupling matrices from a list of source and target sets of samples
- (Xs, ys) and (Xt, yt)
-
- Parameters
- ----------
- Xs : list of K array-like objects, shape K x (nk_source_samples, n_features)
- A list of the training input samples.
- ys : list of K array-like objects, shape K x (nk_source_samples,)
- A list of the class labels
- Xt : array-like, shape (n_target_samples, n_features)
- The training input samples.
- yt : array-like, shape (n_target_samples,)
- The class labels. If some target samples are unlabeled, fill the
- yt's elements with -1.
-
- Warning: Note that, due to this convention -1 cannot be used as a
- class label
-
- Returns
- -------
- self : object
- Returns self.
- """
-
- # check the necessary inputs parameters are here
- if check_params(Xs=Xs, Xt=Xt, ys=ys):
-
- self.xs_ = Xs
- self.xt_ = Xt
-
- returned_ = jcpot_barycenter(Xs=Xs, Ys=ys, Xt=Xt, reg=self.reg_e,
- metric=self.metric, distrinumItermax=self.max_iter, stopThr=self.tol,
- verbose=self.verbose, log=self.log)
-
- # deal with the value of log
- if self.log:
- self.coupling_, self.proportions_, self.log_ = returned_
- else:
- self.coupling_, self.proportions_ = returned_
- self.log_ = dict()
-
- return self
-
- def transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128):
- """Transports source samples Xs onto target ones Xt
-
- Parameters
- ----------
- Xs : array-like, shape (n_source_samples, n_features)
- The training input samples.
- ys : array-like, shape (n_source_samples,)
- The class labels
- Xt : array-like, shape (n_target_samples, n_features)
- The training input samples.
- yt : array-like, shape (n_target_samples,)
- The class labels. If some target samples are unlabeled, fill the
- yt's elements with -1.
-
- Warning: Note that, due to this convention -1 cannot be used as a
- class label
- batch_size : int, optional (default=128)
- The batch size for out of sample inverse transform
- """
-
- transp_Xs = []
-
- # check the necessary inputs parameters are here
- if check_params(Xs=Xs):
-
- if all([np.allclose(x, y) for x, y in zip(self.xs_, Xs)]):
-
- # perform standard barycentric mapping for each source domain
-
- for coupling in self.coupling_:
- transp = coupling / np.sum(coupling, 1)[:, None]
-
- # set nans to 0
- transp[~ np.isfinite(transp)] = 0
-
- # compute transported samples
- transp_Xs.append(np.dot(transp, self.xt_))
- else:
-
- # perform out of sample mapping
- indices = np.arange(Xs.shape[0])
- batch_ind = [
- indices[i:i + batch_size]
- for i in range(0, len(indices), batch_size)]
-
- transp_Xs = []
-
- for bi in batch_ind:
- transp_Xs_ = []
-
- # get the nearest neighbor in the sources domains
- xs = np.concatenate(self.xs_, axis=0)
- idx = np.argmin(dist(Xs[bi], xs), axis=1)
-
- # transport the source samples
- for coupling in self.coupling_:
- transp = coupling / np.sum(
- coupling, 1)[:, None]
- transp[~ np.isfinite(transp)] = 0
- transp_Xs_.append(np.dot(transp, self.xt_))
-
- transp_Xs_ = np.concatenate(transp_Xs_, axis=0)
-
- # define the transported points
- transp_Xs_ = transp_Xs_[idx, :] + Xs[bi] - xs[idx, :]
- transp_Xs.append(transp_Xs_)
-
- transp_Xs = np.concatenate(transp_Xs, axis=0)
-
- return transp_Xs
+ return self \ No newline at end of file