diff options
author | Rémi Flamary <remi.flamary@gmail.com> | 2017-07-24 15:14:59 +0200 |
---|---|---|
committer | Rémi Flamary <remi.flamary@gmail.com> | 2017-07-24 15:14:59 +0200 |
commit | 709d8cbc9f9961a5175eb64ae497b854e0b9b184 (patch) | |
tree | 4be68b8291433c23972831000020832ccf440d3c /test | |
parent | 7d9c5e7ef81cfb1cd4725058c09a7f683ca03eef (diff) |
add dr tests
Diffstat (limited to 'test')
-rw-r--r-- | test/test_dr.py | 63 | ||||
-rw-r--r-- | test/test_gpu.py | 53 | ||||
-rw-r--r-- | test/test_optim.py | 4 |
3 files changed, 93 insertions, 27 deletions
diff --git a/test/test_dr.py b/test/test_dr.py new file mode 100644 index 0000000..24ccaa1 --- /dev/null +++ b/test/test_dr.py @@ -0,0 +1,63 @@ +import ot +import numpy as np +import pytest + +try: # test if cudamat installed + import ot.dr + nogo = False +except ImportError: + nogo = True + + +@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") +def test_fda(): + + n = 100 # nb samples in source and target datasets + nz = 0.2 + np.random.seed(0) + + # generate circle dataset + t = np.random.rand(n) * 2 * np.pi + ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 + xs = np.concatenate( + (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) + xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2) + + nbnoise = 8 + + xs = np.hstack((xs, np.random.randn(n, nbnoise))) + + p = 2 + + Pfda, projfda = ot.dr.fda(xs, ys, p) + + projfda(xs) + + assert np.allclose(np.sum(Pfda**2, 0), np.ones(p)) + + +@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") +def test_wda(): + + n = 100 # nb samples in source and target datasets + nz = 0.2 + np.random.seed(0) + + # generate circle dataset + t = np.random.rand(n) * 2 * np.pi + ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 + xs = np.concatenate( + (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) + xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2) + + nbnoise = 8 + + xs = np.hstack((xs, np.random.randn(n, nbnoise))) + + p = 2 + + Pwda, projwda = ot.dr.wda(xs, ys, p, maxiter=10) + + projwda(xs) + + assert np.allclose(np.sum(Pwda**2, 0), np.ones(p)) diff --git a/test/test_gpu.py b/test/test_gpu.py index 49b98d0..9cc39d7 100644 --- a/test/test_gpu.py +++ b/test/test_gpu.py @@ -12,7 +12,8 @@ except ImportError: @pytest.mark.skipif(nogpu, reason="No GPU available") def test_gpu_sinkhorn(): - import ot.gpu + + np.random.seed(0) def describeRes(r): print("min:{:.3E}, max::{:.3E}, mean::{:.3E}, std::{:.3E}".format( @@ -41,29 +42,31 @@ def test_gpu_sinkhorn(): @pytest.mark.skipif(nogpu, reason="No GPU available") def test_gpu_sinkhorn_lpl1(): - def describeRes(r): - print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}" - .format(np.min(r), np.max(r), np.mean(r), np.std(r))) + np.random.seed(0) + + def describeRes(r): + print("min:{:.3E}, max:{:.3E}, mean:{:.3E}, std:{:.3E}" + .format(np.min(r), np.max(r), np.mean(r), np.std(r))) - for n in [50, 100, 500, 1000]: - print(n) - a = np.random.rand(n // 4, 100) - labels_a = np.random.randint(10, size=(n // 4)) - b = np.random.rand(n, 100) - time1 = time.time() - transport = ot.da.OTDA_lpl1() - transport.fit(a, labels_a, b) - G1 = transport.G - time2 = time.time() - transport = ot.gpu.da.OTDA_lpl1() - transport.fit(a, labels_a, b) - G2 = transport.G - time3 = time.time() - print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format( - time2 - time1)) - describeRes(G1) - print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format( - time3 - time2)) - describeRes(G2) + for n in [50, 100, 500, 1000]: + print(n) + a = np.random.rand(n // 4, 100) + labels_a = np.random.randint(10, size=(n // 4)) + b = np.random.rand(n, 100) + time1 = time.time() + transport = ot.da.OTDA_lpl1() + transport.fit(a, labels_a, b) + G1 = transport.G + time2 = time.time() + transport = ot.gpu.da.OTDA_lpl1() + transport.fit(a, labels_a, b) + G2 = transport.G + time3 = time.time() + print("Normal sinkhorn lpl1, time: {:6.2f} sec ".format( + time2 - time1)) + describeRes(G1) + print(" GPU sinkhorn lpl1, time: {:6.2f} sec ".format( + time3 - time2)) + describeRes(G2) - assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5) + assert np.allclose(G1, G2, rtol=1e-5, atol=1e-5) diff --git a/test/test_optim.py b/test/test_optim.py index 43cba7d..a77a37c 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -9,7 +9,7 @@ import numpy as np def test_conditional_gradient(): n = 100 # nb bins - + np.random.seed(0) # bin positions x = np.arange(n, dtype=np.float64) @@ -38,7 +38,7 @@ def test_conditional_gradient(): def test_generalized_conditional_gradient(): n = 100 # nb bins - + np.random.seed(0) # bin positions x = np.arange(n, dtype=np.float64) |