From 13444cabb8318a7759e2d0941baf4aba67308a51 Mon Sep 17 00:00:00 2001 From: Laetitia Chapel Date: Wed, 15 Apr 2020 15:35:16 +0200 Subject: partial with tests --- test/test_partial.py | 141 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100755 test/test_partial.py (limited to 'test/test_partial.py') diff --git a/test/test_partial.py b/test/test_partial.py new file mode 100755 index 0000000..fbcd3c2 --- /dev/null +++ b/test/test_partial.py @@ -0,0 +1,141 @@ +"""Tests for module partial """ + +# Author: +# Laetitia Chapel +# +# License: MIT License + +import numpy as np +import scipy as sp +import ot + + +def test_partial_wasserstein(): + + n_samples = 20 # nb samples (gaussian) + n_noise = 20 # nb of samples (noise) + + mu = np.array([0, 0]) + cov = np.array([[1, 0], [0, 2]]) + + xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xs = np.append(xs, (np.random.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) + xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xt = np.append(xt, (np.random.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) + + M = ot.dist(xs, xt) + + p = ot.unif(n_samples + n_noise) + q = ot.unif(n_samples + n_noise) + + m = 0.5 + + w0, log0 = ot.partial.partial_wasserstein(p, q, M, m=m, log=True) + w, log = ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=m, + log=True) + + # check constratints + np.testing.assert_equal( + w0.sum(1) - p <= 1e-5, [True] * len(p)) # cf convergence wasserstein + np.testing.assert_equal( + w0.sum(0) - q <= 1e-5, [True] * len(q)) # cf convergence wasserstein + np.testing.assert_equal( + w.sum(1) - p <= 1e-5, [True] * len(p)) # cf convergence wasserstein + np.testing.assert_equal( + w.sum(0) - q <= 1e-5, [True] * len(q)) # cf convergence wasserstein + + # check transported mass + np.testing.assert_allclose( + np.sum(w0), m, atol=1e-04) + np.testing.assert_allclose( + np.sum(w), m, atol=1e-04) + + w0, log0 = ot.partial.partial_wasserstein2(p, q, M, m=m, log=True) + w0_val = ot.partial.partial_wasserstein2(p, q, M, m=m, log=False) + + G = log0['T'] + + np.testing.assert_allclose(w0, w0_val, atol=1e-1, rtol=1e-1) + + # check constratints + np.testing.assert_equal( + G.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein + np.testing.assert_equal( + G.sum(0) <= q, [True] * len(q)) # cf convergence wasserstein + np.testing.assert_allclose( + np.sum(G), m, atol=1e-04) + + +def test_partial_gromov_wasserstein(): + n_samples = 20 # nb samples + n_noise = 10 # nb of samples (noise) + + p = ot.unif(n_samples + n_noise) + q = ot.unif(n_samples + n_noise) + + mu_s = np.array([0, 0]) + cov_s = np.array([[1, 0], [0, 1]]) + + mu_t = np.array([0, 0, 0]) + cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + + xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s) + xs = np.concatenate((xs, ((np.random.rand(n_noise, 2) + 1) * 4)), axis=0) + P = sp.linalg.sqrtm(cov_t) + xt = np.random.randn(n_samples, 3).dot(P) + mu_t + xt = np.concatenate((xt, ((np.random.rand(n_noise, 3) + 1) * 10)), axis=0) + xt2 = xs[::-1].copy() + + C1 = ot.dist(xs, xs) + C2 = ot.dist(xt, xt) + C3 = ot.dist(xt2, xt2) + + m = 2 / 3 + res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C3, p, q, m=m, + log=True) + res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C3, p, q, 10, + m=m, log=True) + np.testing.assert_allclose(res0, 0, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(res, 0, atol=1e-1, rtol=1e-1) + + C1 = sp.spatial.distance.cdist(xs, xs) + C2 = sp.spatial.distance.cdist(xt, xt) + + m = 1 + res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, + log=True) + G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss') + np.testing.assert_allclose(G, res0, atol=1e-04) + + res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, + m=m, log=True) + G = ot.gromov.entropic_gromov_wasserstein( + C1, C2, p, q, 'square_loss', epsilon=10) + np.testing.assert_allclose(G, res, atol=1e-02) + + w0, log0 = ot.partial.partial_gromov_wasserstein2(C1, C2, p, q, m=m, + log=True) + w0_val = ot.partial.partial_gromov_wasserstein2(C1, C2, p, q, m=m, + log=False) + G = log0['T'] + np.testing.assert_allclose(w0, w0_val, atol=1e-1, rtol=1e-1) + + m = 2 / 3 + res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, + log=True) + res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, + m=m, log=True) + # check constratints + np.testing.assert_equal( + res0.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein + np.testing.assert_equal( + res0.sum(0) <= q, [True] * len(q)) # cf convergence wasserstein + np.testing.assert_allclose( + np.sum(res0), m, atol=1e-04) + + np.testing.assert_equal( + res.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein + np.testing.assert_equal( + res.sum(0) <= q, [True] * len(q)) # cf convergence wasserstein + np.testing.assert_allclose( + np.sum(res), m, atol=1e-04) -- cgit v1.2.3 From ef7c11a5df3cf6c82864472f0cfa65d6b2036f2f Mon Sep 17 00:00:00 2001 From: Laetitia Chapel Date: Thu, 16 Apr 2020 15:52:00 +0200 Subject: partial with python 3.8 --- .travis.yml | 2 +- ot/partial.py | 12 ++++++------ test/test_partial.py | 9 ++++----- 3 files changed, 11 insertions(+), 12 deletions(-) (limited to 'test/test_partial.py') diff --git a/.travis.yml b/.travis.yml index 072bc55..7ff1b3c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,7 +16,7 @@ matrix: python: 3.7 - os: linux sudo: required - python: 2.7 + python: 3.8 # - os: osx # sudo: required # language: generic diff --git a/ot/partial.py b/ot/partial.py index 8698d9d..726a590 100755 --- a/ot/partial.py +++ b/ot/partial.py @@ -232,7 +232,7 @@ def partial_wasserstein(a, b, M, m=None, nb_dummies=1, log=False, **kwargs): b_extended = np.append(b, [(np.sum(a) - m) / nb_dummies] * nb_dummies) a_extended = np.append(a, [(np.sum(b) - m) / nb_dummies] * nb_dummies) - M_extended = np.ones((len(a_extended), len(b_extended))) * 0 + M_extended = np.zeros((len(a_extended), len(b_extended))) M_extended[-1, -1] = np.max(M) * 1e5 M_extended[:len(a), :len(b)] = M @@ -510,9 +510,9 @@ def partial_gromov_wasserstein(C1, C2, p, q, m=None, nb_dummies=1, G0=None, Gprev = G0 M = gwgrad_partial(C1, C2, G0) - M[M < eps] = np.quantile(M[M > eps], thres) + M[M < eps] = np.quantile(M, thres) - M_emd = np.ones(dim_G_extended) * np.max(M) * 1e2 + M_emd = np.zeros(dim_G_extended) M_emd[:len(p), :len(q)] = M M_emd[-nb_dummies:, -nb_dummies:] = np.max(M) * 1e5 M_emd = np.asarray(M_emd, dtype=np.float64) @@ -729,8 +729,8 @@ def entropic_partial_wasserstein(a, b, M, reg, m=None, numItermax=1000, M = np.asarray(M, dtype=np.float64) dim_a, dim_b = M.shape - dx = np.ones(dim_a) - dy = np.ones(dim_b) + dx = np.ones(dim_a, dtype=np.float64) + dy = np.ones(dim_b, dtype=np.float64) if len(a) == 0: a = np.ones(dim_a, dtype=np.float64) / dim_a @@ -738,7 +738,7 @@ def entropic_partial_wasserstein(a, b, M, reg, m=None, numItermax=1000, b = np.ones(dim_b, dtype=np.float64) / dim_b if m is None: - m = np.min((np.sum(a), np.sum(b))) + m = np.min((np.sum(a), np.sum(b))) * 1.0 if m < 0: raise ValueError("Problem infeasible. Parameter m should be greater" " than 0.") diff --git a/test/test_partial.py b/test/test_partial.py index fbcd3c2..1799fd4 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -93,10 +93,7 @@ def test_partial_gromov_wasserstein(): m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C3, p, q, m=m, log=True) - res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C3, p, q, 10, - m=m, log=True) np.testing.assert_allclose(res0, 0, atol=1e-1, rtol=1e-1) - np.testing.assert_allclose(res, 0, atol=1e-1, rtol=1e-1) C1 = sp.spatial.distance.cdist(xs, xs) C2 = sp.spatial.distance.cdist(xt, xt) @@ -123,8 +120,10 @@ def test_partial_gromov_wasserstein(): m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, log=True) - res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, - m=m, log=True) + res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, + 100, m=m, + log=True) + # check constratints np.testing.assert_equal( res0.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein -- cgit v1.2.3 From 47306ad23d0c9943c14149ffd85d1c3d0544a3df Mon Sep 17 00:00:00 2001 From: Laetitia Chapel Date: Thu, 16 Apr 2020 16:25:16 +0200 Subject: partial with python 3.8 --- test/test_partial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/test_partial.py') diff --git a/test/test_partial.py b/test/test_partial.py index 1799fd4..ce363bd 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -123,7 +123,7 @@ def test_partial_gromov_wasserstein(): res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 100, m=m, log=True) - + # check constratints np.testing.assert_equal( res0.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein -- cgit v1.2.3 From d2ecce4a79228cd10f4beba8b6b2b28239be796d Mon Sep 17 00:00:00 2001 From: Laetitia Chapel Date: Thu, 16 Apr 2020 16:42:59 +0200 Subject: partial with python 3.8 --- test/test_partial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/test_partial.py') diff --git a/test/test_partial.py b/test/test_partial.py index ce363bd..8b1ca89 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -123,7 +123,7 @@ def test_partial_gromov_wasserstein(): res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 100, m=m, log=True) - + # check constratints np.testing.assert_equal( res0.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein -- cgit v1.2.3 From 53b063ed6b6aa15d6cb103a9304bbd169678b2e9 Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 12:39:38 +0200 Subject: better coverage options verbose and log --- ot/bregman.py | 5 ----- test/test_bregman.py | 9 ++++++--- test/test_optim.py | 2 +- test/test_partial.py | 26 +++++++++++++++++++++++++- test/test_stochastic.py | 8 ++++---- test/test_unbalanced.py | 9 ++++++--- 6 files changed, 42 insertions(+), 17 deletions(-) (limited to 'test/test_partial.py') diff --git a/ot/bregman.py b/ot/bregman.py index 543dbaa..b4365d0 100644 --- a/ot/bregman.py +++ b/ot/bregman.py @@ -909,11 +909,6 @@ def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4, else: alpha, beta = warmstart - def get_K(alpha, beta): - """log space computation""" - return np.exp(-(M - alpha.reshape((dim_a, 1)) - - beta.reshape((1, dim_b))) / reg) - # print(np.min(K)) def get_reg(n): # exponential decreasing return (epsilon0 - reg) * np.exp(-n) + reg diff --git a/test/test_bregman.py b/test/test_bregman.py index ec4388d..6aa4e08 100644 --- a/test/test_bregman.py +++ b/test/test_bregman.py @@ -57,6 +57,9 @@ def test_sinkhorn_empty(): np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) + # test empty weights greenkhorn + ot.sinkhorn([], [], M, 1, method='greenkhorn', stopThr=1e-10, log=True) + def test_sinkhorn_variants(): # test sinkhorn @@ -124,7 +127,7 @@ def test_barycenter(method): # wasserstein reg = 1e-2 - bary_wass = ot.bregman.barycenter(A, M, reg, weights, method=method) + bary_wass, log = ot.bregman.barycenter(A, M, reg, weights, method=method, log=True) np.testing.assert_allclose(1, np.sum(bary_wass)) @@ -152,9 +155,9 @@ def test_barycenter_stabilization(): reg = 1e-2 bar_stable = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn_stabilized", - stopThr=1e-8) + stopThr=1e-8, verbose=True) bar = ot.bregman.barycenter(A, M, reg, weights, method="sinkhorn", - stopThr=1e-8) + stopThr=1e-8, verbose=True) np.testing.assert_allclose(bar, bar_stable) diff --git a/test/test_optim.py b/test/test_optim.py index aade36e..87b0268 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -38,7 +38,7 @@ def test_conditional_gradient(): def test_conditional_gradient2(): - n = 4000 # nb samples + n = 1000 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) diff --git a/test/test_partial.py b/test/test_partial.py index 8b1ca89..5960e4e 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -9,6 +9,30 @@ import numpy as np import scipy as sp import ot +def test_partial_wasserstein_lagrange(): + + n_samples = 20 # nb samples (gaussian) + n_noise = 20 # nb of samples (noise) + + mu = np.array([0, 0]) + cov = np.array([[1, 0], [0, 2]]) + + xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xs = np.append(xs, (np.random.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) + xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xt = np.append(xt, (np.random.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) + + M = ot.dist(xs, xt) + + p = ot.unif(n_samples + n_noise) + q = ot.unif(n_samples + n_noise) + + m = 0.5 + + w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 1, log=True) + + + def test_partial_wasserstein(): @@ -32,7 +56,7 @@ def test_partial_wasserstein(): w0, log0 = ot.partial.partial_wasserstein(p, q, M, m=m, log=True) w, log = ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=m, - log=True) + log=True, verbose=True) # check constratints np.testing.assert_equal( diff --git a/test/test_stochastic.py b/test/test_stochastic.py index f0f3fc8..8ddf485 100644 --- a/test/test_stochastic.py +++ b/test/test_stochastic.py @@ -70,8 +70,8 @@ def test_stochastic_asgd(): M = ot.dist(x, x) - G = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd", - numItermax=numItermax) + G, log = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd", + numItermax=numItermax, log=True) # check constratints np.testing.assert_allclose( @@ -145,8 +145,8 @@ def test_stochastic_dual_sgd(): M = ot.dist(x, x) - G = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size, - numItermax=numItermax) + G, log = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size, + numItermax=numItermax, log=True) # check constratints np.testing.assert_allclose( diff --git a/test/test_unbalanced.py b/test/test_unbalanced.py index ca1efba..d5bae42 100644 --- a/test/test_unbalanced.py +++ b/test/test_unbalanced.py @@ -31,9 +31,11 @@ def test_unbalanced_convergence(method): G, log = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon, reg_m=reg_m, method=method, - log=True) + log=True, + verbose=True) loss = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, - method=method) + method=method, + verbose=True) # check fixed point equations # in log-domain fi = reg_m / (reg_m + epsilon) @@ -73,7 +75,8 @@ def test_unbalanced_multiple_inputs(method): loss, log = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon, reg_m=reg_m, method=method, - log=True) + log=True, + verbose=True) # check fixed point equations # in log-domain fi = reg_m / (reg_m + epsilon) -- cgit v1.2.3 From 90bd408e86eccb03b02d57a0cd7963e0c848a1fc Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 13:59:42 +0200 Subject: pep8 --- test/test_partial.py | 5 ++--- test/test_stochastic.py | 4 ++-- test/test_unbalanced.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'test/test_partial.py') diff --git a/test/test_partial.py b/test/test_partial.py index 5960e4e..b533a9c 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -9,6 +9,7 @@ import numpy as np import scipy as sp import ot + def test_partial_wasserstein_lagrange(): n_samples = 20 # nb samples (gaussian) @@ -29,9 +30,7 @@ def test_partial_wasserstein_lagrange(): m = 0.5 - w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 1, log=True) - - + w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 1, log=True) def test_partial_wasserstein(): diff --git a/test/test_stochastic.py b/test/test_stochastic.py index 8ddf485..155622c 100644 --- a/test/test_stochastic.py +++ b/test/test_stochastic.py @@ -71,7 +71,7 @@ def test_stochastic_asgd(): M = ot.dist(x, x) G, log = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd", - numItermax=numItermax, log=True) + numItermax=numItermax, log=True) # check constratints np.testing.assert_allclose( @@ -146,7 +146,7 @@ def test_stochastic_dual_sgd(): M = ot.dist(x, x) G, log = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size, - numItermax=numItermax, log=True) + numItermax=numItermax, log=True) # check constratints np.testing.assert_allclose( diff --git a/test/test_unbalanced.py b/test/test_unbalanced.py index d5bae42..dfeaad9 100644 --- a/test/test_unbalanced.py +++ b/test/test_unbalanced.py @@ -35,7 +35,7 @@ def test_unbalanced_convergence(method): verbose=True) loss = ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, method=method, - verbose=True) + verbose=True) # check fixed point equations # in log-domain fi = reg_m / (reg_m + epsilon) -- cgit v1.2.3 From 17d388be57cb5b0b2492c6b0ad8940e58b36016a Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 14:18:41 +0200 Subject: test raise un partial ot --- ot/datasets.py | 4 ++-- test/test_partial.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 3 deletions(-) (limited to 'test/test_partial.py') diff --git a/ot/datasets.py b/ot/datasets.py index a1ca7b6..daca1ae 100644 --- a/ot/datasets.py +++ b/ot/datasets.py @@ -147,8 +147,8 @@ def make_data_classif(dataset, n, nz=.5, theta=0, p=.5, random_state=None, **kwa n2 = np.sum(y == 2) x = np.zeros((n, 2)) - x[y == 1, :] = get_2D_samples_gauss(n1, m1, nz, random_state=generator) - x[y == 2, :] = get_2D_samples_gauss(n2, m2, nz, random_state=generator) + x[y == 1, :] = make_2D_samples_gauss(n1, m1, nz, random_state=generator) + x[y == 2, :] = make_2D_samples_gauss(n2, m2, nz, random_state=generator) x = x.dot(rot) diff --git a/test/test_partial.py b/test/test_partial.py index b533a9c..eb3b76e 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -8,6 +8,53 @@ import numpy as np import scipy as sp import ot +import pytest + + +def test_raise_errors(): + + n_samples = 20 # nb samples (gaussian) + n_noise = 20 # nb of samples (noise) + + mu = np.array([0, 0]) + cov = np.array([[1, 0], [0, 2]]) + + xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xs = np.append(xs, (np.random.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) + xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) + xt = np.append(xt, (np.random.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) + + M = ot.dist(xs, xt) + + p = ot.unif(n_samples + n_noise) + q = ot.unif(n_samples + n_noise) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein_lagrange(p + 1, q, M, 1, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein(p, q, M, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_wasserstein(p, q, M, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_gromov_wasserstein(M, M, p, q, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.partial_gromov_wasserstein(M, M, p, q, m=-1, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=2, log=True) + + with pytest.raises(ValueError): + ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=-1, log=True) def test_partial_wasserstein_lagrange(): @@ -115,7 +162,7 @@ def test_partial_gromov_wasserstein(): m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C3, p, q, m=m, - log=True) + log=True, verbose=True) np.testing.assert_allclose(res0, 0, atol=1e-1, rtol=1e-1) C1 = sp.spatial.distance.cdist(xs, xs) -- cgit v1.2.3 From eb3a70af671736c940c8aceaff8547b057d1335a Mon Sep 17 00:00:00 2001 From: Rémi Flamary Date: Fri, 24 Apr 2020 14:20:33 +0200 Subject: left some unused variable... --- test/test_partial.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'test/test_partial.py') diff --git a/test/test_partial.py b/test/test_partial.py index eb3b76e..510e081 100755 --- a/test/test_partial.py +++ b/test/test_partial.py @@ -75,8 +75,6 @@ def test_partial_wasserstein_lagrange(): p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) - m = 0.5 - w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 1, log=True) -- cgit v1.2.3