summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortvayer <titouan.vayer@gmail.com>2019-05-29 15:00:50 +0200
committertvayer <titouan.vayer@gmail.com>2019-05-29 15:00:50 +0200
commitfa989062c17f87bd96aa58ad764fd3791ea11e22 (patch)
tree0a6c7e571967c17aafb144ba018e063a2e43d070
parent63bbeb34e48f02c97a762dab5232158d90a5cffc (diff)
Reame +pep8
-rw-r--r--README.md14
-rw-r--r--examples/plot_barycenter_fgw.py150
-rw-r--r--examples/plot_fgw.py138
-rw-r--r--test/test_gromov.py53
-rw-r--r--test/test_optim.py9
5 files changed, 190 insertions, 174 deletions
diff --git a/README.md b/README.md
index fd27f9d..b6b215c 100644
--- a/README.md
+++ b/README.md
@@ -222,3 +222,17 @@ You can also post bug reports and feature requests in Github issues. Make sure t
[16] Agueh, M., & Carlier, G. (2011). [Barycenters in the Wasserstein space](https://hal.archives-ouvertes.fr/hal-00637399/document). SIAM Journal on Mathematical Analysis, 43(2), 904-924.
[17] Blondel, M., Seguy, V., & Rolet, A. (2018). [Smooth and Sparse Optimal Transport](https://arxiv.org/abs/1710.06276). Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS).
+
+[18] Genevay, A., Cuturi, M., Peyré, G. & Bach, F. (2016) [Stochastic Optimization for Large-scale Optimal Transport](https://arxiv.org/abs/1605.08527). Advances in Neural Information Processing Systems (2016).
+
+[19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. [Large-scale Optimal Transport and Mapping Estimation](https://arxiv.org/pdf/1711.02283.pdf). International Conference on Learning Representation (2018)
+
+[20] Cuturi, M. and Doucet, A. (2014) [Fast Computation of Wasserstein Barycenters](http://proceedings.mlr.press/v32/cuturi14.html). International Conference in Machine Learning
+
+[21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015). [Convolutional wasserstein distances: Efficient optimal transportation on geometric domains](https://dl.acm.org/citation.cfm?id=2766963). ACM Transactions on Graphics (TOG), 34(4), 66.
+
+[22] J. Altschuler, J.Weed, P. Rigollet, (2017) [Near-linear time approximation algorithms for optimal transport via Sinkhorn iteration](https://papers.nips.cc/paper/6792-near-linear-time-approximation-algorithms-for-optimal-transport-via-sinkhorn-iteration.pdf), Advances in Neural Information Processing Systems (NIPS) 31
+
+[23] Aude, G., Peyré, G., Cuturi, M., [Learning Generative Models with Sinkhorn Divergences](https://arxiv.org/abs/1706.00292), Proceedings of the Twenty-First International Conference on Artficial Intelligence and Statistics, (AISTATS) 21, 2018
+
+[24] Vayer, T., Chapel, L., Flamary, R., Tavenard, R. and Courty, N. (2019). [Optimal Transport for structured data with application on graphs](http://proceedings.mlr.press/v97/titouan19a.html) Proceedings of the 36th International Conference on Machine Learning (ICML).
diff --git a/examples/plot_barycenter_fgw.py b/examples/plot_barycenter_fgw.py
index f416629..9eea036 100644
--- a/examples/plot_barycenter_fgw.py
+++ b/examples/plot_barycenter_fgw.py
@@ -30,10 +30,11 @@ from matplotlib import cm
from ot.gromov import fgw_barycenters
#%% Graph functions
-def find_thresh(C,inf=0.5,sup=3,step=10):
+
+def find_thresh(C, inf=0.5, sup=3, step=10):
""" Trick to find the adequate thresholds from where value of the C matrix are considered close enough to say that nodes are connected
- Tthe threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested.
- The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjency matrix
+ Tthe threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested.
+ The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjency matrix
and the original matrix.
Parameters
----------
@@ -43,21 +44,22 @@ def find_thresh(C,inf=0.5,sup=3,step=10):
The beginning of the linesearch
sup : float
The end of the linesearch
- step : integer
- Number of thresholds tested
+ step : integer
+ Number of thresholds tested
"""
- dist=[]
- search=np.linspace(inf,sup,step)
+ dist = []
+ search = np.linspace(inf, sup, step)
for thresh in search:
- Cprime=sp_to_adjency(C,0,thresh)
- SC=shortest_path(Cprime,method='D')
- SC[SC==float('inf')]=100
- dist.append(np.linalg.norm(SC-C))
- return search[np.argmin(dist)],dist
-
-def sp_to_adjency(C,threshinf=0.2,threshsup=1.8):
- """ Thresholds the structure matrix in order to compute an adjency matrix.
- All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0
+ Cprime = sp_to_adjency(C, 0, thresh)
+ SC = shortest_path(Cprime, method='D')
+ SC[SC == float('inf')] = 100
+ dist.append(np.linalg.norm(SC - C))
+ return search[np.argmin(dist)], dist
+
+
+def sp_to_adjency(C, threshinf=0.2, threshsup=1.8):
+ """ Thresholds the structure matrix in order to compute an adjency matrix.
+ All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0
Parameters
----------
C : ndarray, shape (n_nodes,n_nodes)
@@ -71,102 +73,100 @@ def sp_to_adjency(C,threshinf=0.2,threshsup=1.8):
C : ndarray, shape (n_nodes,n_nodes)
The threshold matrix. Each element is in {0,1}
"""
- H=np.zeros_like(C)
- np.fill_diagonal(H,np.diagonal(C))
- C=C-H
- C=np.minimum(np.maximum(C,threshinf),threshsup)
- C[C==threshsup]=0
- C[C!=0]=1
-
- return C
-
-def build_noisy_circular_graph(N=20,mu=0,sigma=0.3,with_noise=False,structure_noise=False,p=None):
+ H = np.zeros_like(C)
+ np.fill_diagonal(H, np.diagonal(C))
+ C = C - H
+ C = np.minimum(np.maximum(C, threshinf), threshsup)
+ C[C == threshsup] = 0
+ C[C != 0] = 1
+
+ return C
+
+
+def build_noisy_circular_graph(N=20, mu=0, sigma=0.3, with_noise=False, structure_noise=False, p=None):
""" Create a noisy circular graph
"""
- g=nx.Graph()
+ g = nx.Graph()
g.add_nodes_from(list(range(N)))
for i in range(N):
- noise=float(np.random.normal(mu,sigma,1))
+ noise = float(np.random.normal(mu, sigma, 1))
if with_noise:
- g.add_node(i,attr_name=math.sin((2*i*math.pi/N))+noise)
+ g.add_node(i, attr_name=math.sin((2 * i * math.pi / N)) + noise)
else:
- g.add_node(i,attr_name=math.sin(2*i*math.pi/N))
- g.add_edge(i,i+1)
+ g.add_node(i, attr_name=math.sin(2 * i * math.pi / N))
+ g.add_edge(i, i + 1)
if structure_noise:
- randomint=np.random.randint(0,p)
- if randomint==0:
- if i<=N-3:
- g.add_edge(i,i+2)
- if i==N-2:
- g.add_edge(i,0)
- if i==N-1:
- g.add_edge(i,1)
- g.add_edge(N,0)
- noise=float(np.random.normal(mu,sigma,1))
+ randomint = np.random.randint(0, p)
+ if randomint == 0:
+ if i <= N - 3:
+ g.add_edge(i, i + 2)
+ if i == N - 2:
+ g.add_edge(i, 0)
+ if i == N - 1:
+ g.add_edge(i, 1)
+ g.add_edge(N, 0)
+ noise = float(np.random.normal(mu, sigma, 1))
if with_noise:
- g.add_node(N,attr_name=math.sin((2*N*math.pi/N))+noise)
+ g.add_node(N, attr_name=math.sin((2 * N * math.pi / N)) + noise)
else:
- g.add_node(N,attr_name=math.sin(2*N*math.pi/N))
+ g.add_node(N, attr_name=math.sin(2 * N * math.pi / N))
return g
-def graph_colors(nx_graph,vmin=0,vmax=7):
- cnorm = mcol.Normalize(vmin=vmin,vmax=vmax)
- cpick = cm.ScalarMappable(norm=cnorm,cmap='viridis')
+
+def graph_colors(nx_graph, vmin=0, vmax=7):
+ cnorm = mcol.Normalize(vmin=vmin, vmax=vmax)
+ cpick = cm.ScalarMappable(norm=cnorm, cmap='viridis')
cpick.set_array([])
val_map = {}
- for k,v in nx.get_node_attributes(nx_graph,'attr_name').items():
- val_map[k]=cpick.to_rgba(v)
- colors=[]
+ for k, v in nx.get_node_attributes(nx_graph, 'attr_name').items():
+ val_map[k] = cpick.to_rgba(v)
+ colors = []
for node in nx_graph.nodes():
colors.append(val_map[node])
return colors
-
+
#%% create dataset
# We build a dataset of noisy circular graphs.
# Noise is added on the structures by random connections and on the features by gaussian noise.
+
np.random.seed(30)
-X0=[]
+X0 = []
for k in range(9):
- X0.append(build_noisy_circular_graph(np.random.randint(15,25),with_noise=True,structure_noise=True,p=3))
-
+ X0.append(build_noisy_circular_graph(np.random.randint(15, 25), with_noise=True, structure_noise=True, p=3))
+
#%% Plot dataset
-plt.figure(figsize=(8,10))
+plt.figure(figsize=(8, 10))
for i in range(len(X0)):
- plt.subplot(3,3,i+1)
- g=X0[i]
- pos=nx.kamada_kawai_layout(g)
- nx.draw(g,pos=pos,node_color = graph_colors(g,vmin=-1,vmax=1),with_labels=False,node_size=100)
-plt.suptitle('Dataset of noisy graphs. Color indicates the label',fontsize=20)
+ plt.subplot(3, 3, i + 1)
+ g = X0[i]
+ pos = nx.kamada_kawai_layout(g)
+ nx.draw(g, pos=pos, node_color=graph_colors(g, vmin=-1, vmax=1), with_labels=False, node_size=100)
+plt.suptitle('Dataset of noisy graphs. Color indicates the label', fontsize=20)
plt.show()
-
#%%
# We compute the barycenter using FGW. Structure matrices are computed using the shortest_path distance in the graph
# Features distances are the euclidean distances
-Cs=[shortest_path(nx.adjacency_matrix(x)) for x in X0]
-ps=[np.ones(len(x.nodes()))/len(x.nodes()) for x in X0]
-Ys=[np.array([v for (k,v) in nx.get_node_attributes(x,'attr_name').items()]).reshape(-1,1) for x in X0]
-lambdas=np.array([np.ones(len(Ys))/len(Ys)]).ravel()
-sizebary=15 # we choose a barycenter with 15 nodes
+Cs = [shortest_path(nx.adjacency_matrix(x)) for x in X0]
+ps = [np.ones(len(x.nodes())) / len(x.nodes()) for x in X0]
+Ys = [np.array([v for (k, v) in nx.get_node_attributes(x, 'attr_name').items()]).reshape(-1, 1) for x in X0]
+lambdas = np.array([np.ones(len(Ys)) / len(Ys)]).ravel()
+sizebary = 15 # we choose a barycenter with 15 nodes
#%%
-A,C,log=fgw_barycenters(sizebary,Ys,Cs,ps,lambdas,alpha=0.95)
+A, C, log = fgw_barycenters(sizebary, Ys, Cs, ps, lambdas, alpha=0.95)
#%%
-bary=nx.from_numpy_matrix(sp_to_adjency(C,threshinf=0,threshsup=find_thresh(C,sup=100,step=100)[0]))
+bary = nx.from_numpy_matrix(sp_to_adjency(C, threshinf=0, threshsup=find_thresh(C, sup=100, step=100)[0]))
for i in range(len(A.ravel())):
- bary.add_node(i,attr_name=float(A.ravel()[i]))
-
+ bary.add_node(i, attr_name=float(A.ravel()[i]))
+
#%%
pos = nx.kamada_kawai_layout(bary)
-nx.draw(bary,pos=pos,node_color = graph_colors(bary,vmin=-1,vmax=1),with_labels=False)
-plt.suptitle('Barycenter',fontsize=20)
+nx.draw(bary, pos=pos, node_color=graph_colors(bary, vmin=-1, vmax=1), with_labels=False)
+plt.suptitle('Barycenter', fontsize=20)
plt.show()
-
-
-
-
diff --git a/examples/plot_fgw.py b/examples/plot_fgw.py
index bfa7fb4..ae3c487 100644
--- a/examples/plot_fgw.py
+++ b/examples/plot_fgw.py
@@ -20,132 +20,132 @@ This example illustrates the computation of FGW for 1D measures[18].
import matplotlib.pyplot as pl
import numpy as np
import ot
-from ot.gromov import gromov_wasserstein,fused_gromov_wasserstein
+from ot.gromov import gromov_wasserstein, fused_gromov_wasserstein
#%% parameters
-# We create two 1D random measures
-n=20
-n2=30
-sig=1
-sig2=0.1
+# We create two 1D random measures
+n = 20
+n2 = 30
+sig = 1
+sig2 = 0.1
np.random.seed(0)
-phi=np.arange(n)[:,None]
-xs=phi+sig*np.random.randn(n,1)
-ys=np.vstack((np.ones((n//2,1)),0*np.ones((n//2,1))))+sig2*np.random.randn(n,1)
+phi = np.arange(n)[:, None]
+xs = phi + sig * np.random.randn(n, 1)
+ys = np.vstack((np.ones((n // 2, 1)), 0 * np.ones((n // 2, 1)))) + sig2 * np.random.randn(n, 1)
-phi2=np.arange(n2)[:,None]
-xt=phi2+sig*np.random.randn(n2,1)
-yt=np.vstack((np.ones((n2//2,1)),0*np.ones((n2//2,1))))+sig2*np.random.randn(n2,1)
-yt= yt[::-1,:]
+phi2 = np.arange(n2)[:, None]
+xt = phi2 + sig * np.random.randn(n2, 1)
+yt = np.vstack((np.ones((n2 // 2, 1)), 0 * np.ones((n2 // 2, 1)))) + sig2 * np.random.randn(n2, 1)
+yt = yt[::-1, :]
-p=ot.unif(n)
-q=ot.unif(n2)
+p = ot.unif(n)
+q = ot.unif(n2)
#%% plot the distributions
pl.close(10)
-pl.figure(10,(7,7))
+pl.figure(10, (7, 7))
-pl.subplot(2,1,1)
+pl.subplot(2, 1, 1)
-pl.scatter(ys,xs,c=phi,s=70)
-pl.ylabel('Feature value a',fontsize=20)
-pl.title('$\mu=\sum_i \delta_{x_i,a_i}$',fontsize=25, usetex=True, y=1)
+pl.scatter(ys, xs, c=phi, s=70)
+pl.ylabel('Feature value a', fontsize=20)
+pl.title('$\mu=\sum_i \delta_{x_i,a_i}$', fontsize=25, usetex=True, y=1)
pl.xticks(())
pl.yticks(())
-pl.subplot(2,1,2)
-pl.scatter(yt,xt,c=phi2,s=70)
-pl.xlabel('coordinates x/y',fontsize=25)
-pl.ylabel('Feature value b',fontsize=20)
-pl.title('$\\nu=\sum_j \delta_{y_j,b_j}$',fontsize=25, usetex=True, y=1)
+pl.subplot(2, 1, 2)
+pl.scatter(yt, xt, c=phi2, s=70)
+pl.xlabel('coordinates x/y', fontsize=25)
+pl.ylabel('Feature value b', fontsize=20)
+pl.title('$\\nu=\sum_j \delta_{y_j,b_j}$', fontsize=25, usetex=True, y=1)
pl.yticks(())
pl.tight_layout()
pl.show()
#%% Structure matrices and across-features distance matrix
-C1=ot.dist(xs)
-C2=ot.dist(xt).T
-M=ot.dist(ys,yt)
-w1=ot.unif(C1.shape[0])
-w2=ot.unif(C2.shape[0])
-Got=ot.emd([],[],M)
+C1 = ot.dist(xs)
+C2 = ot.dist(xt).T
+M = ot.dist(ys, yt)
+w1 = ot.unif(C1.shape[0])
+w2 = ot.unif(C2.shape[0])
+Got = ot.emd([], [], M)
#%%
-cmap='Reds'
+cmap = 'Reds'
pl.close(10)
-pl.figure(10,(5,5))
-fs=15
-l_x=[0,5,10,15]
-l_y=[0,5,10,15,20,25]
+pl.figure(10, (5, 5))
+fs = 15
+l_x = [0, 5, 10, 15]
+l_y = [0, 5, 10, 15, 20, 25]
gs = pl.GridSpec(5, 5)
-ax1=pl.subplot(gs[3:,:2])
+ax1 = pl.subplot(gs[3:, :2])
-pl.imshow(C1,cmap=cmap,interpolation='nearest')
-pl.title("$C_1$",fontsize=fs)
-pl.xlabel("$k$",fontsize=fs)
-pl.ylabel("$i$",fontsize=fs)
+pl.imshow(C1, cmap=cmap, interpolation='nearest')
+pl.title("$C_1$", fontsize=fs)
+pl.xlabel("$k$", fontsize=fs)
+pl.ylabel("$i$", fontsize=fs)
pl.xticks(l_x)
pl.yticks(l_x)
-ax2=pl.subplot(gs[:3,2:])
+ax2 = pl.subplot(gs[:3, 2:])
-pl.imshow(C2,cmap=cmap,interpolation='nearest')
-pl.title("$C_2$",fontsize=fs)
-pl.ylabel("$l$",fontsize=fs)
+pl.imshow(C2, cmap=cmap, interpolation='nearest')
+pl.title("$C_2$", fontsize=fs)
+pl.ylabel("$l$", fontsize=fs)
#pl.ylabel("$l$",fontsize=fs)
pl.xticks(())
pl.yticks(l_y)
ax2.set_aspect('auto')
-ax3=pl.subplot(gs[3:,2:],sharex=ax2,sharey=ax1)
-pl.imshow(M,cmap=cmap,interpolation='nearest')
+ax3 = pl.subplot(gs[3:, 2:], sharex=ax2, sharey=ax1)
+pl.imshow(M, cmap=cmap, interpolation='nearest')
pl.yticks(l_x)
pl.xticks(l_y)
-pl.ylabel("$i$",fontsize=fs)
-pl.title("$M_{AB}$",fontsize=fs)
-pl.xlabel("$j$",fontsize=fs)
+pl.ylabel("$i$", fontsize=fs)
+pl.title("$M_{AB}$", fontsize=fs)
+pl.xlabel("$j$", fontsize=fs)
pl.tight_layout()
ax3.set_aspect('auto')
pl.show()
#%% Computing FGW and GW
-alpha=1e-3
-
+alpha = 1e-3
+
ot.tic()
-Gwg,logw=fused_gromov_wasserstein(M,C1,C2,p,q,loss_fun='square_loss',alpha=alpha,verbose=True,log=True)
+Gwg, logw = fused_gromov_wasserstein(M, C1, C2, p, q, loss_fun='square_loss', alpha=alpha, verbose=True, log=True)
ot.toc()
-#%reload_ext WGW
-Gg,log=gromov_wasserstein(C1,C2,p,q,loss_fun='square_loss',verbose=True,log=True)
-
+#%reload_ext WGW
+Gg, log = gromov_wasserstein(C1, C2, p, q, loss_fun='square_loss', verbose=True, log=True)
+
#%% visu OT matrix
-cmap='Blues'
-fs=15
-pl.figure(2,(13,5))
+cmap = 'Blues'
+fs = 15
+pl.figure(2, (13, 5))
pl.clf()
-pl.subplot(1,3,1)
-pl.imshow(Got,cmap=cmap,interpolation='nearest')
+pl.subplot(1, 3, 1)
+pl.imshow(Got, cmap=cmap, interpolation='nearest')
#pl.xlabel("$y$",fontsize=fs)
-pl.ylabel("$i$",fontsize=fs)
+pl.ylabel("$i$", fontsize=fs)
pl.xticks(())
pl.title('Wasserstein ($M$ only)')
-pl.subplot(1,3,2)
-pl.imshow(Gg,cmap=cmap,interpolation='nearest')
+pl.subplot(1, 3, 2)
+pl.imshow(Gg, cmap=cmap, interpolation='nearest')
pl.title('Gromov ($C_1,C_2$ only)')
pl.xticks(())
-pl.subplot(1,3,3)
-pl.imshow(Gwg,cmap=cmap,interpolation='nearest')
+pl.subplot(1, 3, 3)
+pl.imshow(Gwg, cmap=cmap, interpolation='nearest')
pl.title('FGW ($M+C_1,C_2$)')
-pl.xlabel("$j$",fontsize=fs)
-pl.ylabel("$i$",fontsize=fs)
+pl.xlabel("$j$", fontsize=fs)
+pl.ylabel("$i$", fontsize=fs)
pl.tight_layout()
-pl.show() \ No newline at end of file
+pl.show()
diff --git a/test/test_gromov.py b/test/test_gromov.py
index 43b63e1..cd180d4 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -145,7 +145,8 @@ def test_gromov_entropic_barycenter():
'kl_loss', 2e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
-
+
+
def test_fgw():
n_samples = 50 # nb samples
@@ -155,9 +156,9 @@ def test_fgw():
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s)
xt = xs[::-1].copy()
-
- ys = np.random.randn(xs.shape[0],2)
- yt= ys[::-1].copy()
+
+ ys = np.random.randn(xs.shape[0], 2)
+ yt = ys[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
@@ -167,11 +168,11 @@ def test_fgw():
C1 /= C1.max()
C2 /= C2.max()
-
- M=ot.dist(ys,yt)
- M/=M.max()
- G = ot.gromov.fused_gromov_wasserstein(M,C1, C2, p, q, 'square_loss',alpha=0.5)
+ M = ot.dist(ys, yt)
+ M /= M.max()
+
+ G = ot.gromov.fused_gromov_wasserstein(M, C1, C2, p, q, 'square_loss', alpha=0.5)
# check constratints
np.testing.assert_allclose(
@@ -187,36 +188,36 @@ def test_fgw_barycenter():
Xs, ys = ot.datasets.make_data_classif('3gauss', ns)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt)
-
- ys = np.random.randn(Xs.shape[0],2)
- yt= np.random.randn(Xt.shape[0],2)
+
+ ys = np.random.randn(Xs.shape[0], 2)
+ yt = np.random.randn(Xt.shape[0], 2)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
- X,C,log = ot.gromov.fgw_barycenters(n_samples,[ys,yt] ,[C1, C2],[ot.unif(ns), ot.unif(nt)],[.5, .5],0.5,
- fixed_structure=False,fixed_features=False,
- p=ot.unif(n_samples),loss_fun='square_loss',
- max_iter=100, tol=1e-3)
+ X, C, log = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
+ fixed_structure=False, fixed_features=False,
+ p=ot.unif(n_samples), loss_fun='square_loss',
+ max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
xalea = np.random.randn(n_samples, 2)
init_C = ot.dist(xalea, xalea)
-
- X,C,log = ot.gromov.fgw_barycenters(n_samples,[ys,yt] ,[C1, C2],ps=[ot.unif(ns), ot.unif(nt)],lambdas=[.5, .5],alpha=0.5,
- fixed_structure=True,init_C=init_C,fixed_features=False,
- p=ot.unif(n_samples),loss_fun='square_loss',
- max_iter=100, tol=1e-3)
+
+ X, C, log = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], ps=[ot.unif(ns), ot.unif(nt)], lambdas=[.5, .5], alpha=0.5,
+ fixed_structure=True, init_C=init_C, fixed_features=False,
+ p=ot.unif(n_samples), loss_fun='square_loss',
+ max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
-
- init_X=np.random.randn(n_samples,ys.shape[1])
- X,C,log = ot.gromov.fgw_barycenters(n_samples,[ys,yt] ,[C1, C2],[ot.unif(ns), ot.unif(nt)],[.5, .5],0.5,
- fixed_structure=False,fixed_features=True, init_X=init_X,
- p=ot.unif(n_samples),loss_fun='square_loss',
- max_iter=100, tol=1e-3)
+ init_X = np.random.randn(n_samples, ys.shape[1])
+
+ X, C, log = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
+ fixed_structure=False, fixed_features=True, init_X=init_X,
+ p=ot.unif(n_samples), loss_fun='square_loss',
+ max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
diff --git a/test/test_optim.py b/test/test_optim.py
index 1188ef6..e7ba32a 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -65,8 +65,9 @@ def test_generalized_conditional_gradient():
np.testing.assert_allclose(a, G.sum(1), atol=1e-05)
np.testing.assert_allclose(b, G.sum(0), atol=1e-05)
-
+
+
def test_solve_1d_linesearch_quad_funct():
- np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(1,-1,0),0.5)
- np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(-1,5,0),0)
- np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(-1,0.5,0),1)
+ np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(1, -1, 0), 0.5)
+ np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(-1, 5, 0), 0)
+ np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad_funct(-1, 0.5, 0), 1)