From b1f87363b160735b6e2df59380f9de56b7934b53 Mon Sep 17 00:00:00 2001 From: ievred Date: Wed, 1 Apr 2020 09:42:09 +0200 Subject: add dataset clean plot --- ot/datasets.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'ot/datasets.py') diff --git a/ot/datasets.py b/ot/datasets.py index ba0cfd9..eea9f37 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -80,7 +80,7 @@ def get_2D_samples_gauss(n, m, sigma, random_state=None): return make_2D_samples_gauss(n, m, sigma, random_state=None) -def make_data_classif(dataset, n, nz=.5, theta=0, random_state=None, **kwargs): +def make_data_classif(dataset, n, nz=.5, theta=0, p = .5, random_state=None, **kwargs): """Dataset generation for classification problems Parameters @@ -91,6 +91,8 @@ def make_data_classif(dataset, n, nz=.5, theta=0, random_state=None, **kwargs): number of training samples nz : float noise level (>0) + p : float + proportion of one class in the binary setting random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; @@ -150,6 +152,17 @@ def make_data_classif(dataset, n, nz=.5, theta=0, random_state=None, **kwargs): x = x.dot(rot) + elif dataset.lower() == '2gauss_prop': + + y = np.concatenate((np.ones(int(p * n)), np.zeros(int((1 - p) * n)))) + x = np.hstack((0 * y[:, None] - 0, 1 - 2 * y[:, None])) + nz * np.random.randn(len(y), 2) + + if ('bias' not in kwargs) and ('b' not in kwargs): + kwargs['bias'] = np.array([0, 2]) + + x[:, 0] += kwargs['bias'][0] + x[:, 1] += kwargs['bias'][1] + else: x = np.array(0) y = np.array(0) -- cgit v1.2.3 From 9200af5d795517b0772c10bb3d16022dd1a12791 Mon Sep 17 00:00:00 2001 From: ievred Date: Thu, 2 Apr 2020 15:29:12 +0200 Subject: laplace v1 --- ot/bregman.py | 72 +++++++++++++++++++++++++++++++++---------------------- ot/datasets.py | 4 ++-- ot/lp/__init__.py | 4 +--- ot/plot.py | 3 ++- 4 files changed, 49 insertions(+), 34 deletions(-) (limited to 'ot/datasets.py') diff --git a/ot/bregman.py b/ot/bregman.py index fb959e9..951d3ce 100644 --- a/ot/bregman.py +++ b/ot/bregman.py @@ -19,6 +19,7 @@ import warnings from .utils import unif, dist from scipy.optimize import fmin_l_bfgs_b + def sinkhorn(a, b, M, reg, method='sinkhorn', numItermax=1000, stopThr=1e-9, verbose=False, log=False, **kwargs): r""" @@ -539,12 +540,12 @@ def greenkhorn(a, b, M, reg, numItermax=10000, stopThr=1e-9, verbose=False, old_v = v[i_2] v[i_2] = b[i_2] / (K[:, i_2].T.dot(u)) G[:, i_2] = u * K[:, i_2] * v[i_2] - #aviol = (G@one_m - a) - #aviol_2 = (G.T@one_n - b) + # aviol = (G@one_m - a) + # aviol_2 = (G.T@one_n - b) viol += (-old_v + v[i_2]) * K[:, i_2] * u viol_2[i_2] = v[i_2] * K[:, i_2].dot(u) - b[i_2] - #print('b',np.max(abs(aviol -viol)),np.max(abs(aviol_2 - viol_2))) + # print('b',np.max(abs(aviol -viol)),np.max(abs(aviol_2 - viol_2))) if stopThr_val <= stopThr: break @@ -715,7 +716,7 @@ def sinkhorn_stabilized(a, b, M, reg, numItermax=1000, tau=1e3, stopThr=1e-9, if np.abs(u).max() > tau or np.abs(v).max() > tau: if n_hists: alpha, beta = alpha + reg * \ - np.max(np.log(u), 1), beta + reg * np.max(np.log(v)) + np.max(np.log(u), 1), beta + reg * np.max(np.log(v)) else: alpha, beta = alpha + reg * np.log(u), beta + reg * np.log(v) if n_hists: @@ -940,7 +941,7 @@ def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4, # the 10th iterations transp = G err = np.linalg.norm( - (np.sum(transp, axis=0) - b))**2 + np.linalg.norm((np.sum(transp, axis=1) - a))**2 + (np.sum(transp, axis=0) - b)) ** 2 + np.linalg.norm((np.sum(transp, axis=1) - a)) ** 2 if log: log['err'].append(err) @@ -966,7 +967,7 @@ def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4, def geometricBar(weights, alldistribT): """return the weighted geometric mean of distributions""" - assert(len(weights) == alldistribT.shape[1]) + assert (len(weights) == alldistribT.shape[1]) return np.exp(np.dot(np.log(alldistribT), weights.T)) @@ -1108,7 +1109,7 @@ def barycenter_sinkhorn(A, M, reg, weights=None, numItermax=1000, if weights is None: weights = np.ones(A.shape[1]) / A.shape[1] else: - assert(len(weights) == A.shape[1]) + assert (len(weights) == A.shape[1]) if log: log = {'err': []} @@ -1206,7 +1207,7 @@ def barycenter_stabilized(A, M, reg, tau=1e10, weights=None, numItermax=1000, if weights is None: weights = np.ones(n_hists) / n_hists else: - assert(len(weights) == A.shape[1]) + assert (len(weights) == A.shape[1]) if log: log = {'err': []} @@ -1334,7 +1335,7 @@ def convolutional_barycenter2d(A, reg, weights=None, numItermax=10000, if weights is None: weights = np.ones(A.shape[0]) / A.shape[0] else: - assert(len(weights) == A.shape[0]) + assert (len(weights) == A.shape[0]) if log: log = {'err': []} @@ -1350,11 +1351,11 @@ def convolutional_barycenter2d(A, reg, weights=None, numItermax=10000, # this is equivalent to blurring on horizontal then vertical directions t = np.linspace(0, 1, A.shape[1]) [Y, X] = np.meshgrid(t, t) - xi1 = np.exp(-(X - Y)**2 / reg) + xi1 = np.exp(-(X - Y) ** 2 / reg) t = np.linspace(0, 1, A.shape[2]) [Y, X] = np.meshgrid(t, t) - xi2 = np.exp(-(X - Y)**2 / reg) + xi2 = np.exp(-(X - Y) ** 2 / reg) def K(x): return np.dot(np.dot(xi1, x), xi2) @@ -1501,6 +1502,7 @@ def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000, else: return np.sum(K0, axis=1) + def jcpot_barycenter(Xs, Ys, Xt, reg, metric='sqeuclidean', numItermax=100, stopThr=1e-6, verbose=False, log=False, **kwargs): r'''Joint OT and proportion estimation for multi-source target shift as proposed in [27] @@ -1658,6 +1660,7 @@ def jcpot_barycenter(Xs, Ys, Xt, reg, metric='sqeuclidean', numItermax=100, else: return couplings, bary + def empirical_sinkhorn(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, verbose=False, log=False, **kwargs): @@ -1749,7 +1752,8 @@ def empirical_sinkhorn(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', return pi -def empirical_sinkhorn2(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, verbose=False, log=False, **kwargs): +def empirical_sinkhorn2(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, + verbose=False, log=False, **kwargs): r''' Solve the entropic regularization optimal transport problem from empirical data and return the OT loss @@ -1831,14 +1835,17 @@ def empirical_sinkhorn2(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', num M = dist(X_s, X_t, metric=metric) if log: - sinkhorn_loss, log = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) + sinkhorn_loss, log = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, + **kwargs) return sinkhorn_loss, log else: - sinkhorn_loss = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) + sinkhorn_loss = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, + **kwargs) return sinkhorn_loss -def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, verbose=False, log=False, **kwargs): +def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, + verbose=False, log=False, **kwargs): r''' Compute the sinkhorn divergence loss from empirical data @@ -1924,11 +1931,14 @@ def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeucli .. [23] Aude Genevay, Gabriel Peyré, Marco Cuturi, Learning Generative Models with Sinkhorn Divergences, Proceedings of the Twenty-First International Conference on Artficial Intelligence and Statistics, (AISTATS) 21, 2018 ''' if log: - sinkhorn_loss_ab, log_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_ab, log_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, + stopThr=1e-9, verbose=verbose, log=log, **kwargs) - sinkhorn_loss_a, log_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_a, log_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, + stopThr=1e-9, verbose=verbose, log=log, **kwargs) - sinkhorn_loss_b, log_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_b, log_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, + stopThr=1e-9, verbose=verbose, log=log, **kwargs) sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b) @@ -1943,11 +1953,14 @@ def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeucli return max(0, sinkhorn_div), log else: - sinkhorn_loss_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, + verbose=verbose, log=log, **kwargs) - sinkhorn_loss_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, + verbose=verbose, log=log, **kwargs) - sinkhorn_loss_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs) + sinkhorn_loss_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, + verbose=verbose, log=log, **kwargs) sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b) return max(0, sinkhorn_div) @@ -2039,7 +2052,8 @@ def screenkhorn(a, b, M, reg, ns_budget=None, nt_budget=None, uniform=False, res try: import bottleneck except ImportError: - warnings.warn("Bottleneck module is not installed. Install it from https://pypi.org/project/Bottleneck/ for better performance.") + warnings.warn( + "Bottleneck module is not installed. Install it from https://pypi.org/project/Bottleneck/ for better performance.") bottleneck = np a = np.asarray(a, dtype=np.float64) @@ -2173,10 +2187,11 @@ def screenkhorn(a, b, M, reg, ns_budget=None, nt_budget=None, uniform=False, res # box constraints in L-BFGS-B (see Proposition 1 in [26]) bounds_u = [(max(a_I_min / ((nt - nt_budget) * epsilon + nt_budget * (b_J_max / ( - ns * epsilon * kappa * K_min))), epsilon / kappa), a_I_max / (nt * epsilon * K_min))] * ns_budget + ns * epsilon * kappa * K_min))), epsilon / kappa), a_I_max / (nt * epsilon * K_min))] * ns_budget - bounds_v = [(max(b_J_min / ((ns - ns_budget) * epsilon + ns_budget * (kappa * a_I_max / (nt * epsilon * K_min))), - epsilon * kappa), b_J_max / (ns * epsilon * K_min))] * nt_budget + bounds_v = [( + max(b_J_min / ((ns - ns_budget) * epsilon + ns_budget * (kappa * a_I_max / (nt * epsilon * K_min))), + epsilon * kappa), b_J_max / (ns * epsilon * K_min))] * nt_budget # pre-calculated constants for the objective vec_eps_IJc = epsilon * kappa * (K_IJc * np.ones(nt - nt_budget).reshape((1, -1))).sum(axis=1) @@ -2225,7 +2240,8 @@ def screenkhorn(a, b, M, reg, ns_budget=None, nt_budget=None, uniform=False, res return usc, vsc def screened_obj(usc, vsc): - part_IJ = np.dot(np.dot(usc, K_IJ), vsc) - kappa * np.dot(a_I, np.log(usc)) - (1. / kappa) * np.dot(b_J, np.log(vsc)) + part_IJ = np.dot(np.dot(usc, K_IJ), vsc) - kappa * np.dot(a_I, np.log(usc)) - (1. / kappa) * np.dot(b_J, + np.log(vsc)) part_IJc = np.dot(usc, vec_eps_IJc) part_IcJ = np.dot(vec_eps_IcJ, vsc) psi_epsilon = part_IJ + part_IJc + part_IcJ @@ -2247,9 +2263,9 @@ def screenkhorn(a, b, M, reg, ns_budget=None, nt_budget=None, uniform=False, res g = np.hstack([g_u, g_v]) return f, g - #----------------------------------------------------------------------------------------------------------------# + # ----------------------------------------------------------------------------------------------------------------# # Step 2: L-BFGS-B solver # - #----------------------------------------------------------------------------------------------------------------# + # ----------------------------------------------------------------------------------------------------------------# u0, v0 = restricted_sinkhorn(u0, v0) theta0 = np.hstack([u0, v0]) diff --git a/ot/datasets.py b/ot/datasets.py index eea9f37..a1ca7b6 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -30,7 +30,7 @@ def make_1D_gauss(n, m, s): 1D histogram for a gaussian distribution """ x = np.arange(n, dtype=np.float64) - h = np.exp(-(x - m)**2 / (2 * s**2)) + h = np.exp(-(x - m) ** 2 / (2 * s ** 2)) return h / h.sum() @@ -80,7 +80,7 @@ def get_2D_samples_gauss(n, m, sigma, random_state=None): return make_2D_samples_gauss(n, m, sigma, random_state=None) -def make_data_classif(dataset, n, nz=.5, theta=0, p = .5, random_state=None, **kwargs): +def make_data_classif(dataset, n, nz=.5, theta=0, p=.5, random_state=None, **kwargs): """Dataset generation for classification problems Parameters diff --git a/ot/lp/__init__.py b/ot/lp/__init__.py index cdd505d..7eaa44a 100644 --- a/ot/lp/__init__.py +++ b/ot/lp/__init__.py @@ -2,8 +2,6 @@ """ Solvers for the original linear program OT problem - - """ # Author: Remi Flamary @@ -18,7 +16,7 @@ from scipy.sparse import coo_matrix from .import cvx # import compiled emd -from .emd_wrap import emd_c, check_result, emd_1d_sorted +#from .emd_wrap import emd_c, check_result, emd_1d_sorted from ..utils import parmap from .cvx import barycenter from ..utils import dist diff --git a/ot/plot.py b/ot/plot.py index f403e98..ad436b4 100644 --- a/ot/plot.py +++ b/ot/plot.py @@ -78,9 +78,10 @@ def plot2D_samples_mat(xs, xt, G, thr=1e-8, **kwargs): thr : float, optional threshold above which the line is drawn **kwargs : dict - paameters given to the plot functions (default color is black if + parameters given to the plot functions (default color is black if nothing given) """ + if ('color' not in kwargs) and ('c' not in kwargs): kwargs['color'] = 'k' mx = G.max() -- cgit v1.2.3 From 17d388be57cb5b0b2492c6b0ad8940e58b36016a Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 14:18:41 +0200 Subject: test raise un partial ot --- ot/datasets.py | 4 ++-- test/test_partial.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 3 deletions(-) (limited to 'ot/datasets.py') diff --git a/ot/datasets.py b/ot/datasets.py index a1ca7b6..daca1ae 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -147,8 +147,8 @@ def make_data_classif(dataset, n, nz=.5, theta=0, p=.5, random_state=None, **kwa n2 = np.sum(y == 2) x = np.zeros((n, 2)) - x[y == 1, :] = get_2D_samples_gauss(n1, m1, nz, random_state=generator) - x[y == 2, :] = get_2D_samples_gauss(n2, m2, nz, random_state=generator) + x[y == 1, :] = make_2D_samples_gauss(n1, m1, nz, random_state=generator) + x[y == 2, :] = make_2D_samples_gauss(n2, m2, nz, random_state=generator) x = x.dot(rot) diff --git a/test/test_partial.py b/test/test_partial.py index b533a9c..eb3b76e 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -8,6 +8,53 @@ import numpy as np import scipy as sp import ot +import pytest + + +def test_raise_errors(): + + n_samples = 20 # nb samples (gaussian) + n_noise = 20 # nb of samples (noise) + + mu = np.array([0, 0]) + cov = np.array([[1, 0], [0, 2]]) + + xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xs = np.append(xs, (np.random.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) + xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xt = np.append(xt, (np.random.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) + + M = ot.dist(xs, xt) + + p = ot.unif(n_samples + n_noise) + q = ot.unif(n_samples + n_noise) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein_lagrange(p + 1, q, M, 1, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein(p, q, M, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein(p, q, M, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_gromov_wasserstein(M, M, p, q, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_gromov_wasserstein(M, M, p, q, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=-1, log=True) def test_partial_wasserstein_lagrange(): @@ -115,7 +162,7 @@ def test_partial_gromov_wasserstein(): m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C3, p, q, m=m, - log=True) + log=True, verbose=True) np.testing.assert_allclose(res0, 0, atol=1e-1, rtol=1e-1) C1 = sp.spatial.distance.cdist(xs, xs) -- cgit v1.2.3 From ad7aa892b47f039366a30103c1cede804811fb46 Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 16:39:29 +0200 Subject: better doc per module --- ot/__init__.py | 30 ------------------------------ ot/bregman.py | 2 +- ot/datasets.py | 2 +- ot/dr.py | 4 ++-- ot/gpu/__init__.py | 5 +++-- ot/gromov.py | 2 +- ot/optim.py | 2 +- ot/partial.py | 2 +- ot/smooth.py | 4 +++- ot/unbalanced.py | 2 +- 10 files changed, 14 insertions(+), 41 deletions(-) (limited to 'ot/datasets.py') diff --git a/ot/__init__.py b/ot/__init__.py index 1e57b78..2d23610 100644 --- a/ot/__init__.py +++ b/ot/__init__.py @@ -1,35 +1,5 @@ """ -This is the main module of the POT toolbox. It provides easy access to -a number of sub-modules and functions described below. - -.. note:: - - - Here is a list of the submodules and short description of what they contain. - - - :any:`ot.lp` contains OT solvers for the exact (Linear Program) OT problems. - - :any:`ot.bregman` contains OT solvers for the entropic OT problems using - Bregman projections. - - :any:`ot.lp` contains OT solvers for the exact (Linear Program) OT problems. - - :any:`ot.smooth` contains OT solvers for the regularized (l2 and kl) smooth OT - problems. - - :any:`ot.gromov` contains solvers for Gromov-Wasserstein and Fused Gromov - Wasserstein problems. - - :any:`ot.optim` contains generic solvers OT based optimization problems - - :any:`ot.da` contains classes and function related to Monge mapping - estimation and Domain Adaptation (DA). - - :any:`ot.gpu` contains GPU (cupy) implementation of some OT solvers - - :any:`ot.dr` contains Dimension Reduction (DR) methods such as Wasserstein - Discriminant Analysis. - - :any:`ot.utils` contains utility functions such as distance computation and - timing. - - :any:`ot.datasets` contains toy dataset generation functions. - - :any:`ot.plot` contains visualization functions - - :any:`ot.stochastic` contains stochastic solvers for regularized OT. - - :any:`ot.unbalanced` contains solvers for regularized unbalanced OT. - - :any:`ot.partial` contains solvers for partial OT. - .. warning:: The list of automatically imported sub-modules is as follows: :py:mod:`ot.lp`, :py:mod:`ot.bregman`, :py:mod:`ot.optim` diff --git a/ot/bregman.py b/ot/bregman.py index b4365d0..f1f8437 100644 --- a/ot/bregman.py +++ b/ot/bregman.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Bregman projections for regularized OT +Bregman projections solvers for entropic regularized OT """ # Author: Remi Flamary diff --git a/ot/datasets.py b/ot/datasets.py index daca1ae..bd39e13 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -1,5 +1,5 @@ """ -Simple example datasets for OT +Simple example datasets """ # Author: Remi Flamary diff --git a/ot/dr.py b/ot/dr.py index 680dabf..11d2e10 100644 --- a/ot/dr.py +++ b/ot/dr.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- """ -Dimension reduction with optimal transport +Dimension reduction with OT .. warning:: - Note that by default the module is not import in :mod:`ot`. In order to + Note that by default the module is not imported in :mod:`ot`. In order to use it you need to explicitely import :mod:`ot.dr` """ diff --git a/ot/gpu/__init__.py b/ot/gpu/__init__.py index 1ab95bb..7478fb9 100644 --- a/ot/gpu/__init__.py +++ b/ot/gpu/__init__.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- """ +GPU implementation for several OT solvers and utility +functions. -This module provides GPU implementation for several OT solvers and utility -functions. The GPU backend in handled by `cupy +The GPU backend in handled by `cupy `_. .. warning:: diff --git a/ot/gromov.py b/ot/gromov.py index 43780a4..a678722 100644 --- a/ot/gromov.py +++ b/ot/gromov.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Gromov-Wasserstein transport method +Gromov-Wasserstein and Fused-Gromov-Wasserstein solvers """ # Author: Erwan Vautier diff --git a/ot/optim.py b/ot/optim.py index 4012e0d..b9ca891 100644 --- a/ot/optim.py +++ b/ot/optim.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Optimization algorithms for OT +Generic solvers for regularized OT """ # Author: Remi Flamary diff --git a/ot/partial.py b/ot/partial.py index c03ec25..eb707d8 100755 --- a/ot/partial.py +++ b/ot/partial.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Partial OT +Partial OT solvers """ # Author: Laetitia Chapel diff --git a/ot/smooth.py b/ot/smooth.py index 5a8e4b5..81f6a3e 100644 --- a/ot/smooth.py +++ b/ot/smooth.py @@ -26,7 +26,9 @@ # Remi Flamary """ -Implementation of +Smooth and Sparse Optimal Transport solvers (KL an L2 reg.) + +Implementation of : Smooth and Sparse Optimal Transport. Mathieu Blondel, Vivien Seguy, Antoine Rolet. In Proc. of AISTATS 2018. diff --git a/ot/unbalanced.py b/ot/unbalanced.py index 23f6607..e37f10c 100644 --- a/ot/unbalanced.py +++ b/ot/unbalanced.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Regularized Unbalanced OT +Regularized Unbalanced OT solvers """ # Author: Hicham Janati -- cgit v1.2.3 From 08ec66ede42350dd040b559cca181d2e599c3d2d Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 16:41:56 +0200 Subject: pep8 --- ot/datasets.py | 2 +- ot/gromov.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'ot/datasets.py') diff --git a/ot/datasets.py b/ot/datasets.py index bd39e13..b86ef3b 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -1,5 +1,5 @@ """ -Simple example datasets +Simple example datasets """ # Author: Remi Flamary diff --git a/ot/gromov.py b/ot/gromov.py index a678722..4427a96 100644 --- a/ot/gromov.py +++ b/ot/gromov.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -Gromov-Wasserstein and Fused-Gromov-Wasserstein solvers +Gromov-Wasserstein and Fused-Gromov-Wasserstein solvers """ # Author: Erwan Vautier -- cgit v1.2.3