summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/test_bregman.py65
-rw-r--r--test/test_da.py32
-rw-r--r--test/test_dr.py34
-rw-r--r--test/test_gpu.py42
-rw-r--r--test/test_optim.py32
-rw-r--r--test/test_ot.py4
6 files changed, 100 insertions, 109 deletions
diff --git a/test/test_bregman.py b/test/test_bregman.py
index 025568c..1638ef6 100644
--- a/test/test_bregman.py
+++ b/test/test_bregman.py
@@ -1,17 +1,14 @@
-
-import ot
import numpy as np
-
-# import pytest
+import ot
def test_sinkhorn():
# test sinkhorn
n = 100
- np.random.seed(0)
+ rng = np.random.RandomState(0)
- x = np.random.randn(n, 2)
+ x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
@@ -19,45 +16,47 @@ def test_sinkhorn():
G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10)
# check constratints
- assert np.allclose(u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
- assert np.allclose(u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
+ np.testing.assert_allclose(
+ u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
+ np.testing.assert_allclose(
+ u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
def test_sinkhorn_empty():
# test sinkhorn
n = 100
- np.random.seed(0)
+ rng = np.random.RandomState(0)
- x = np.random.randn(n, 2)
+ x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True)
# check constratints
- assert np.allclose(u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
- assert np.allclose(u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
+ np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
+ np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10,
method='sinkhorn_stabilized', verbose=True, log=True)
# check constratints
- assert np.allclose(u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
- assert np.allclose(u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
+ np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
+ np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
G, log = ot.sinkhorn(
[], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling',
verbose=True, log=True)
# check constratints
- assert np.allclose(u, G.sum(1), atol=1e-05) # cf convergence sinkhorn
- assert np.allclose(u, G.sum(0), atol=1e-05) # cf convergence sinkhorn
+ np.testing.assert_allclose(u, G.sum(1), atol=1e-05)
+ np.testing.assert_allclose(u, G.sum(0), atol=1e-05)
def test_sinkhorn_variants():
# test sinkhorn
n = 100
- np.random.seed(0)
+ rng = np.random.RandomState(0)
- x = np.random.randn(n, 2)
+ x = rng.randn(n, 2)
u = ot.utils.unif(n)
M = ot.dist(x, x)
@@ -69,24 +68,24 @@ def test_sinkhorn_variants():
Gerr = ot.sinkhorn(u, u, M, 1, method='do_not_exists', stopThr=1e-10)
# check values
- assert np.allclose(G0, Gs, atol=1e-05)
- assert np.allclose(G0, Ges, atol=1e-05)
- assert np.allclose(G0, Gerr)
+ np.testing.assert_allclose(G0, Gs, atol=1e-05)
+ np.testing.assert_allclose(G0, Ges, atol=1e-05)
+ np.testing.assert_allclose(G0, Gerr)
def test_bary():
- n = 100 # nb bins
+ n_bins = 100 # nb bins
# Gaussian distributions
- a1 = ot.datasets.get_1D_gauss(n, m=30, s=10) # m= mean, s= std
- a2 = ot.datasets.get_1D_gauss(n, m=40, s=10)
+ a1 = ot.datasets.get_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
+ a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
# loss matrix + normalization
- M = ot.utils.dist0(n)
+ M = ot.utils.dist0(n_bins)
M /= M.max()
alpha = 0.5 # 0<=alpha<=1
@@ -96,26 +95,26 @@ def test_bary():
reg = 1e-3
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
- assert np.allclose(1, np.sum(bary_wass))
+ np.testing.assert_allclose(1, np.sum(bary_wass))
ot.bregman.barycenter(A, M, reg, log=True, verbose=True)
def test_unmix():
- n = 50 # nb bins
+ n_bins = 50 # nb bins
# Gaussian distributions
- a1 = ot.datasets.get_1D_gauss(n, m=20, s=10) # m= mean, s= std
- a2 = ot.datasets.get_1D_gauss(n, m=40, s=10)
+ a1 = ot.datasets.get_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
+ a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
- a = ot.datasets.get_1D_gauss(n, m=30, s=10)
+ a = ot.datasets.get_1D_gauss(n_bins, m=30, s=10)
# creating matrix A containing all distributions
D = np.vstack((a1, a2)).T
# loss matrix + normalization
- M = ot.utils.dist0(n)
+ M = ot.utils.dist0(n_bins)
M /= M.max()
M0 = ot.utils.dist0(2)
@@ -126,8 +125,8 @@ def test_unmix():
reg = 1e-3
um = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01,)
- assert np.allclose(1, np.sum(um), rtol=1e-03, atol=1e-03)
- assert np.allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03)
+ np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03)
+ np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03)
ot.bregman.unmix(a, D, M, M0, h0, reg,
1, alpha=0.01, log=True, verbose=True)
diff --git a/test/test_da.py b/test/test_da.py
index 50d3aba..a38390f 100644
--- a/test/test_da.py
+++ b/test/test_da.py
@@ -1,19 +1,17 @@
-
-import ot
import numpy as np
-
-# import pytest
+import ot
-def test_OTDA():
+def test_otda():
- n = 150 # nb bins
+ n_samples = 150 # nb samples
+ np.random.seed(0)
- xs, ys = ot.datasets.get_data_classif('3gauss', n)
- xt, yt = ot.datasets.get_data_classif('3gauss2', n)
+ xs, ys = ot.datasets.get_data_classif('3gauss', n_samples)
+ xt, yt = ot.datasets.get_data_classif('3gauss2', n_samples)
- a, b = ot.unif(n), ot.unif(n)
+ a, b = ot.unif(n_samples), ot.unif(n_samples)
# LP problem
da_emd = ot.da.OTDA() # init class
@@ -21,8 +19,8 @@ def test_OTDA():
da_emd.interp() # interpolation of source samples
da_emd.predict(xs) # interpolation of source samples
- assert np.allclose(a, np.sum(da_emd.G, 1))
- assert np.allclose(b, np.sum(da_emd.G, 0))
+ np.testing.assert_allclose(a, np.sum(da_emd.G, 1))
+ np.testing.assert_allclose(b, np.sum(da_emd.G, 0))
# sinkhorn regularization
lambd = 1e-1
@@ -31,8 +29,8 @@ def test_OTDA():
da_entrop.interp()
da_entrop.predict(xs)
- assert np.allclose(a, np.sum(da_entrop.G, 1), rtol=1e-3, atol=1e-3)
- assert np.allclose(b, np.sum(da_entrop.G, 0), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(a, np.sum(da_entrop.G, 1), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(b, np.sum(da_entrop.G, 0), rtol=1e-3, atol=1e-3)
# non-convex Group lasso regularization
reg = 1e-1
@@ -42,8 +40,8 @@ def test_OTDA():
da_lpl1.interp()
da_lpl1.predict(xs)
- assert np.allclose(a, np.sum(da_lpl1.G, 1), rtol=1e-3, atol=1e-3)
- assert np.allclose(b, np.sum(da_lpl1.G, 0), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(a, np.sum(da_lpl1.G, 1), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(b, np.sum(da_lpl1.G, 0), rtol=1e-3, atol=1e-3)
# True Group lasso regularization
reg = 1e-1
@@ -53,8 +51,8 @@ def test_OTDA():
da_l1l2.interp()
da_l1l2.predict(xs)
- assert np.allclose(a, np.sum(da_l1l2.G, 1), rtol=1e-3, atol=1e-3)
- assert np.allclose(b, np.sum(da_l1l2.G, 0), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(a, np.sum(da_l1l2.G, 1), rtol=1e-3, atol=1e-3)
+ np.testing.assert_allclose(b, np.sum(da_l1l2.G, 0), rtol=1e-3, atol=1e-3)
# linear mapping
da_emd = ot.da.OTDA_mapping_linear() # init class
diff --git a/test/test_dr.py b/test/test_dr.py
index 3da7705..bdb920e 100644
--- a/test/test_dr.py
+++ b/test/test_dr.py
@@ -1,8 +1,9 @@
-import ot
+
import numpy as np
+import ot
import pytest
-try: # test if cudamat installed
+try: # test if autograd and pymanopt are installed
import ot.dr
nogo = False
except ImportError:
@@ -12,15 +13,15 @@ except ImportError:
@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)")
def test_fda():
- n = 90 # nb samples in source and target datasets
+ n_samples = 90 # nb samples in source and target datasets
np.random.seed(0)
- # generate circle dataset
- xs, ys = ot.datasets.get_data_classif('gaussrot', n)
+ # generate gaussian dataset
+ xs, ys = ot.datasets.get_data_classif('gaussrot', n_samples)
- nbnoise = 8
+ n_features_noise = 8
- xs = np.hstack((xs, np.random.randn(n, nbnoise)))
+ xs = np.hstack((xs, np.random.randn(n_samples, n_features_noise)))
p = 1
@@ -28,26 +29,21 @@ def test_fda():
projfda(xs)
- assert np.allclose(np.sum(Pfda**2, 0), np.ones(p))
+ np.testing.assert_allclose(np.sum(Pfda**2, 0), np.ones(p))
@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)")
def test_wda():
- n = 100 # nb samples in source and target datasets
- nz = 0.2
+ n_samples = 100 # nb samples in source and target datasets
np.random.seed(0)
- # generate circle dataset
- t = np.random.rand(n) * 2 * np.pi
- ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1
- xs = np.concatenate(
- (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1)
- xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2)
+ # generate gaussian dataset
+ xs, ys = ot.datasets.get_data_classif('gaussrot', n_samples)
- nbnoise = 8
+ n_features_noise = 8
- xs = np.hstack((xs, np.random.randn(n, nbnoise)))
+ xs = np.hstack((xs, np.random.randn(n_samples, n_features_noise)))
p = 2
@@ -55,4 +51,4 @@ def test_wda():
projwda(xs)
- assert np.allclose(np.sum(Pwda**2, 0), np.ones(p))
+ np.testing.assert_allclose(np.sum(Pwda**2, 0), np.ones(p))
diff --git a/test/test_gpu.py b/test/test_gpu.py
index 24797f2..98f59f7 100644
--- a/test/test_gpu.py
+++ b/test/test_gpu.py
@@ -1,5 +1,6 @@
-import ot
+
import numpy as np
+import ot
import time
import pytest
@@ -13,16 +14,16 @@ except ImportError:
@pytest.mark.skipif(nogpu, reason="No GPU available")
def test_gpu_sinkhorn():
- np.random.seed(0)
+ rng = np.random.RandomState(0)
- def describeRes(r):
+ def describe_res(r):
print("min:{:.3E}, max::{:.3E}, mean::{:.3E}, std::{:.3E}".format(
np.min(r), np.max(r), np.mean(r), np.std(r)))
- for n in [50, 100, 500, 1000]:
- print(n)
- a = np.random.rand(n // 4, 100)
- b = np.random.rand(n, 100)
+ for n_samples in [50, 100, 500, 1000]:
+ print(n_samples)
+ a = rng.rand(n_samples // 4, 100)
+ b = rng.rand(n_samples, 100)
time1 = time.time()
transport = ot.da.OTDA_sinkhorn()
transport.fit(a, b)
@@ -33,26 +34,27 @@ def test_gpu_sinkhorn():
G2 = transport.G
time3 = time.time()
print("Normal sinkhorn, time: {:6.2f} sec ".format(time2 - time1))
- describeRes(G1)
+ describe_res(G1)
print(" GPU sinkhorn, time: {:6.2f} sec ".format(time3 - time2))
- describeRes(G2)
+ describe_res(G2)
- assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5)
+ np.testing.assert_allclose(G1, G2, rtol=1e-5, atol=1e-5)
@pytest.mark.skipif(nogpu, reason="No GPU available")
def test_gpu_sinkhorn_lpl1():
- np.random.seed(0)
- def describeRes(r):
+ rng = np.random.RandomState(0)
+
+ def describe_res(r):
print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}"
.format(np.min(r), np.max(r), np.mean(r), np.std(r)))
- for n in [50, 100, 500]:
- print(n)
- a = np.random.rand(n // 4, 100)
- labels_a = np.random.randint(10, size=(n // 4))
- b = np.random.rand(n, 100)
+ for n_samples in [50, 100, 500]:
+ print(n_samples)
+ a = rng.rand(n_samples // 4, 100)
+ labels_a = np.random.randint(10, size=(n_samples // 4))
+ b = rng.rand(n_samples, 100)
time1 = time.time()
transport = ot.da.OTDA_lpl1()
transport.fit(a, labels_a, b)
@@ -64,9 +66,9 @@ def test_gpu_sinkhorn_lpl1():
time3 = time.time()
print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format(
time2 - time1))
- describeRes(G1)
+ describe_res(G1)
print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format(
time3 - time2))
- describeRes(G2)
+ describe_res(G2)
- assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5)
+ np.testing.assert_allclose(G1, G2, rtol=1e-5, atol=1e-5)
diff --git a/test/test_optim.py b/test/test_optim.py
index a77a37c..2840cad 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -1,24 +1,22 @@
-
-import ot
import numpy as np
+import ot
-# import pytest
def test_conditional_gradient():
- n = 100 # nb bins
+ n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
- x = np.arange(n, dtype=np.float64)
+ x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
- a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
- b = ot.datasets.get_1D_gauss(n, m=60, s=10)
+ a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
+ b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
# loss matrix
- M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
+ M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
@@ -31,23 +29,23 @@ def test_conditional_gradient():
G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True)
- assert np.allclose(a, G.sum(1))
- assert np.allclose(b, G.sum(0))
+ np.testing.assert_allclose(a, G.sum(1))
+ np.testing.assert_allclose(b, G.sum(0))
def test_generalized_conditional_gradient():
- n = 100 # nb bins
+ n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
- x = np.arange(n, dtype=np.float64)
+ x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
- a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
- b = ot.datasets.get_1D_gauss(n, m=60, s=10)
+ a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
+ b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
# loss matrix
- M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
+ M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
@@ -61,5 +59,5 @@ def test_generalized_conditional_gradient():
G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True)
- assert np.allclose(a, G.sum(1), atol=1e-05)
- assert np.allclose(b, G.sum(0), atol=1e-05)
+ np.testing.assert_allclose(a, G.sum(1), atol=1e-05)
+ np.testing.assert_allclose(b, G.sum(0), atol=1e-05)
diff --git a/test/test_ot.py b/test/test_ot.py
index 5bf65c6..9c0acab 100644
--- a/test/test_ot.py
+++ b/test/test_ot.py
@@ -1,9 +1,7 @@
-
-import ot
import numpy as np
+import ot
-# import pytest
def test_doctest():