summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Courty <Nico@MacBook-Pro-de-Nicolas.local>2017-09-01 11:20:34 +0200
committerNicolas Courty <Nico@MacBook-Pro-de-Nicolas.local>2017-09-01 11:20:34 +0200
commitab6ed1df93cd78bb7f1a54282103d4d830e68bcb (patch)
treebbcff976e21c2e89a4656c542506cd0f728309bb
parent4ec5b339ef527d4d49a022ddf57b38dff037548c (diff)
docstrings and naming
-rw-r--r--examples/plot_gromov.py10
-rwxr-xr-xexamples/plot_gromov_barycenter.py20
-rw-r--r--ot/gromov.py18
-rw-r--r--test/test_gromov.py10
4 files changed, 29 insertions, 29 deletions
diff --git a/examples/plot_gromov.py b/examples/plot_gromov.py
index 9bbdbde..92312ae 100644
--- a/examples/plot_gromov.py
+++ b/examples/plot_gromov.py
@@ -26,7 +26,7 @@ The Gromov-Wasserstein distance allows to compute distances with samples that do
For demonstration purpose, we sample two Gaussian distributions in 2- and 3-dimensional spaces.
"""
-n = 30 # nb samples
+n_samples = 30 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
@@ -35,9 +35,9 @@ mu_t = np.array([4, 4, 4])
cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
-xs = ot.datasets.get_2D_samples_gauss(n, mu_s, cov_s)
+xs = ot.datasets.get_2D_samples_gauss(n_samples, mu_s, cov_s)
P = sp.linalg.sqrtm(cov_t)
-xt = np.random.randn(n, 3).dot(P) + mu_t
+xt = np.random.randn(n_samples, 3).dot(P) + mu_t
"""
@@ -75,8 +75,8 @@ Compute Gromov-Wasserstein plans and distance
=============================================
"""
-p = ot.unif(n)
-q = ot.unif(n)
+p = ot.unif(n_samples)
+q = ot.unif(n_samples)
gw = ot.gromov_wasserstein(C1, C2, p, q, 'square_loss', epsilon=5e-4)
gw_dist = ot.gromov_wasserstein2(C1, C2, p, q, 'square_loss', epsilon=5e-4)
diff --git a/examples/plot_gromov_barycenter.py b/examples/plot_gromov_barycenter.py
index da52768..f0657e1 100755
--- a/examples/plot_gromov_barycenter.py
+++ b/examples/plot_gromov_barycenter.py
@@ -91,12 +91,12 @@ def im2mat(I):
return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
-carre = spi.imread('../data/carre.png').astype(np.float64) / 256
-rond = spi.imread('../data/rond.png').astype(np.float64) / 256
+square = spi.imread('../data/carre.png').astype(np.float64) / 256
+circle = spi.imread('../data/rond.png').astype(np.float64) / 256
triangle = spi.imread('../data/triangle.png').astype(np.float64) / 256
-fleche = spi.imread('../data/coeur.png').astype(np.float64) / 256
+arrow = spi.imread('../data/coeur.png').astype(np.float64) / 256
-shapes = [carre, rond, triangle, fleche]
+shapes = [square, circle, triangle, arrow]
S = 4
xs = [[] for i in range(S)]
@@ -118,36 +118,36 @@ Barycenter computation
The four distributions are constructed from 4 simple images
"""
ns = [len(xs[s]) for s in range(S)]
-N = 30
+n_samples = 30
"""Compute all distances matrices for the four shapes"""
Cs = [sp.spatial.distance.cdist(xs[s], xs[s]) for s in range(S)]
Cs = [cs / cs.max() for cs in Cs]
ps = [ot.unif(ns[s]) for s in range(S)]
-p = ot.unif(N)
+p = ot.unif(n_samples)
lambdast = [[float(i) / 3, float(3 - i) / 3] for i in [1, 2]]
Ct01 = [0 for i in range(2)]
for i in range(2):
- Ct01[i] = ot.gromov.gromov_barycenters(N, [Cs[0], Cs[1]], [
+ Ct01[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[1]], [
ps[0], ps[1]], p, lambdast[i], 'square_loss', 5e-4, numItermax=100, stopThr=1e-3)
Ct02 = [0 for i in range(2)]
for i in range(2):
- Ct02[i] = ot.gromov.gromov_barycenters(N, [Cs[0], Cs[2]], [
+ Ct02[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[2]], [
ps[0], ps[2]], p, lambdast[i], 'square_loss', 5e-4, numItermax=100, stopThr=1e-3)
Ct13 = [0 for i in range(2)]
for i in range(2):
- Ct13[i] = ot.gromov.gromov_barycenters(N, [Cs[1], Cs[3]], [
+ Ct13[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[1], Cs[3]], [
ps[1], ps[3]], p, lambdast[i], 'square_loss', 5e-4, numItermax=100, stopThr=1e-3)
Ct23 = [0 for i in range(2)]
for i in range(2):
- Ct23[i] = ot.gromov.gromov_barycenters(N, [Cs[2], Cs[3]], [
+ Ct23[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[2], Cs[3]], [
ps[2], ps[3]], p, lambdast[i], 'square_loss', 5e-4, numItermax=100, stopThr=1e-3)
"""
diff --git a/ot/gromov.py b/ot/gromov.py
index 421ed3f..ad85fcd 100644
--- a/ot/gromov.py
+++ b/ot/gromov.py
@@ -208,7 +208,7 @@ def update_kl_loss(p, lambdas, T, Cs):
return(np.exp(np.divide(tmpsum, ppt)))
-def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopThr=1e-9, verbose=False, log=False):
+def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon, max_iter=1000, stopThr=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein coupling between the two measured similarity matrices
@@ -248,7 +248,7 @@ def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopThr
loss_fun : loss function used for the solver either 'square_loss' or 'kl_loss'
epsilon : float
Regularization term >0
- numItermax : int, optional
+ max_iter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshold on error (>0)
@@ -274,7 +274,7 @@ def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopThr
cpt = 0
err = 1
- while (err > stopThr and cpt < numItermax):
+ while (err > stopThr and cpt < max_iter):
Tprev = T
@@ -307,7 +307,7 @@ def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopThr
return T
-def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopThr=1e-9, verbose=False, log=False):
+def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon, max_iter=1000, stopThr=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein discrepancy between the two measured similarity matrices
@@ -362,10 +362,10 @@ def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopTh
if log:
gw, logv = gromov_wasserstein(
- C1, C2, p, q, loss_fun, epsilon, numItermax, stopThr, verbose, log)
+ C1, C2, p, q, loss_fun, epsilon, max_iter, stopThr, verbose, log)
else:
gw = gromov_wasserstein(C1, C2, p, q, loss_fun,
- epsilon, numItermax, stopThr, verbose, log)
+ epsilon, max_iter, stopThr, verbose, log)
if loss_fun == 'square_loss':
gw_dist = np.sum(gw * tensor_square_loss(C1, C2, gw))
@@ -379,7 +379,7 @@ def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon, numItermax=1000, stopTh
return gw_dist
-def gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon, numItermax=1000, stopThr=1e-9, verbose=False, log=False):
+def gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon, max_iter=1000, stopThr=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein barycenters of S measured similarity matrices
@@ -442,12 +442,12 @@ def gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon, numItermax=1000
error = []
- while(err > stopThr and cpt < numItermax):
+ while(err > stopThr and cpt < max_iter):
Cprev = C
T = [gromov_wasserstein(Cs[s], C, ps[s], p, loss_fun, epsilon,
- numItermax, 1e-5, verbose, log) for s in range(S)]
+ max_iter, 1e-5, verbose, log) for s in range(S)]
if loss_fun == 'square_loss':
C = update_square_loss(p, lambdas, T, Cs)
diff --git a/test/test_gromov.py b/test/test_gromov.py
index 75eeaab..c26d898 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -10,18 +10,18 @@ import ot
def test_gromov():
- n = 50 # nb samples
+ n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
- xs = ot.datasets.get_2D_samples_gauss(n, mu_s, cov_s)
+ xs = ot.datasets.get_2D_samples_gauss(n_samples, mu_s, cov_s)
- xt = [xs[n - (i + 1)] for i in range(n)]
+ xt = [xs[n_samples - (i + 1)] for i in range(n_samples)]
xt = np.array(xt)
- p = ot.unif(n)
- q = ot.unif(n)
+ p = ot.unif(n_samples)
+ q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)