summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRémi Flamary <remi.flamary@gmail.com>2018-05-30 09:30:21 +0200
committerRémi Flamary <remi.flamary@gmail.com>2018-05-30 09:30:21 +0200
commit90e42f32bdf0dd06667edaf172c51f4d4fce2c8b (patch)
treef5e4172c035729342ed998263ebba1b92bd7b608
parent507003fe975c80b069d8527b547f0abc4852d16a (diff)
replace function name tin tests
-rw-r--r--ot/datasets.py8
-rw-r--r--test/test_bregman.py10
-rw-r--r--test/test_da.py52
-rw-r--r--test/test_dr.py4
-rw-r--r--test/test_gromov.py16
-rw-r--r--test/test_optim.py8
-rw-r--r--test/test_ot.py2
-rw-r--r--test/test_plot.py8
8 files changed, 57 insertions, 51 deletions
diff --git a/ot/datasets.py b/ot/datasets.py
index bbb77fb..362a89b 100644
--- a/ot/datasets.py
+++ b/ot/datasets.py
@@ -12,7 +12,7 @@ import scipy as sp
from .utils import check_random_state, deprecated
-def get_1D_gauss(n, m, s):
+def make_1D_gauss(n, m, s):
"""return a 1D histogram for a gaussian distribution (n bins, mean m and std s)
Parameters
@@ -37,6 +37,12 @@ def get_1D_gauss(n, m, s):
return h / h.sum()
+@deprecated()
+def get_1D_gauss(n, m, sigma, random_state=None):
+ """ Deprecated see make_1D_gauss """
+ return make_1D_gauss(n, m, sigma, random_state=None)
+
+
def make_2D_samples_gauss(n, m, sigma, random_state=None):
"""return n samples drawn from 2D gaussian N(m,sigma)
diff --git a/test/test_bregman.py b/test/test_bregman.py
index 4a800fd..c8e9179 100644
--- a/test/test_bregman.py
+++ b/test/test_bregman.py
@@ -83,8 +83,8 @@ def test_bary():
n_bins = 100 # nb bins
# Gaussian distributions
- a1 = ot.datasets.get_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
- a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
+ a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std
+ a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
@@ -110,10 +110,10 @@ def test_unmix():
n_bins = 50 # nb bins
# Gaussian distributions
- a1 = ot.datasets.get_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
- a2 = ot.datasets.get_1D_gauss(n_bins, m=40, s=10)
+ a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std
+ a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10)
- a = ot.datasets.get_1D_gauss(n_bins, m=30, s=10)
+ a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10)
# creating matrix A containing all distributions
D = np.vstack((a1, a2)).T
diff --git a/test/test_da.py b/test/test_da.py
index 3022721..97e23da 100644
--- a/test/test_da.py
+++ b/test/test_da.py
@@ -8,7 +8,7 @@ import numpy as np
from numpy.testing.utils import assert_allclose, assert_equal
import ot
-from ot.datasets import get_data_classif
+from ot.datasets import make_data_classif
from ot.utils import unif
@@ -19,8 +19,8 @@ def test_sinkhorn_lpl1_transport_class():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
otda = ot.da.SinkhornLpl1Transport()
@@ -45,7 +45,7 @@ def test_sinkhorn_lpl1_transport_class():
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = get_data_classif('3gauss', ns + 1)
+ Xs_new, _ = make_data_classif('3gauss', ns + 1)
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -55,7 +55,7 @@ def test_sinkhorn_lpl1_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = get_data_classif('3gauss2', nt + 1)
+ Xt_new, _ = make_data_classif('3gauss2', nt + 1)
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -92,8 +92,8 @@ def test_sinkhorn_l1l2_transport_class():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
otda = ot.da.SinkhornL1l2Transport()
@@ -119,7 +119,7 @@ def test_sinkhorn_l1l2_transport_class():
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = get_data_classif('3gauss', ns + 1)
+ Xs_new, _ = make_data_classif('3gauss', ns + 1)
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -129,7 +129,7 @@ def test_sinkhorn_l1l2_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = get_data_classif('3gauss2', nt + 1)
+ Xt_new, _ = make_data_classif('3gauss2', nt + 1)
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -173,8 +173,8 @@ def test_sinkhorn_transport_class():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
otda = ot.da.SinkhornTransport()
@@ -200,7 +200,7 @@ def test_sinkhorn_transport_class():
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = get_data_classif('3gauss', ns + 1)
+ Xs_new, _ = make_data_classif('3gauss', ns + 1)
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -210,7 +210,7 @@ def test_sinkhorn_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = get_data_classif('3gauss2', nt + 1)
+ Xt_new, _ = make_data_classif('3gauss2', nt + 1)
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -252,8 +252,8 @@ def test_emd_transport_class():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
otda = ot.da.EMDTransport()
@@ -278,7 +278,7 @@ def test_emd_transport_class():
transp_Xs = otda.transform(Xs=Xs)
assert_equal(transp_Xs.shape, Xs.shape)
- Xs_new, _ = get_data_classif('3gauss', ns + 1)
+ Xs_new, _ = make_data_classif('3gauss', ns + 1)
transp_Xs_new = otda.transform(Xs_new)
# check that the oos method is working
@@ -288,7 +288,7 @@ def test_emd_transport_class():
transp_Xt = otda.inverse_transform(Xt=Xt)
assert_equal(transp_Xt.shape, Xt.shape)
- Xt_new, _ = get_data_classif('3gauss2', nt + 1)
+ Xt_new, _ = make_data_classif('3gauss2', nt + 1)
transp_Xt_new = otda.inverse_transform(Xt=Xt_new)
# check that the oos method is working
@@ -329,9 +329,9 @@ def test_mapping_transport_class():
ns = 60
nt = 120
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
- Xs_new, _ = get_data_classif('3gauss', ns + 1)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
+ Xs_new, _ = make_data_classif('3gauss', ns + 1)
##########################################################################
# kernel == linear mapping tests
@@ -449,8 +449,8 @@ def test_linear_mapping():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
A, b = ot.da.OT_mapping_linear(Xs, Xt)
@@ -467,8 +467,8 @@ def test_linear_mapping_class():
ns = 150
nt = 200
- Xs, ys = get_data_classif('3gauss', ns)
- Xt, yt = get_data_classif('3gauss2', nt)
+ Xs, ys = make_data_classif('3gauss', ns)
+ Xt, yt = make_data_classif('3gauss2', nt)
otmap = ot.da.LinearTransport()
@@ -491,8 +491,8 @@ def test_otda():
n_samples = 150 # nb samples
np.random.seed(0)
- xs, ys = ot.datasets.get_data_classif('3gauss', n_samples)
- xt, yt = ot.datasets.get_data_classif('3gauss2', n_samples)
+ xs, ys = ot.datasets.make_data_classif('3gauss', n_samples)
+ xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples)
a, b = ot.unif(n_samples), ot.unif(n_samples)
diff --git a/test/test_dr.py b/test/test_dr.py
index 915012d..c5df287 100644
--- a/test/test_dr.py
+++ b/test/test_dr.py
@@ -22,7 +22,7 @@ def test_fda():
np.random.seed(0)
# generate gaussian dataset
- xs, ys = ot.datasets.get_data_classif('gaussrot', n_samples)
+ xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples)
n_features_noise = 8
@@ -44,7 +44,7 @@ def test_wda():
np.random.seed(0)
# generate gaussian dataset
- xs, ys = ot.datasets.get_data_classif('gaussrot', n_samples)
+ xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples)
n_features_noise = 8
diff --git a/test/test_gromov.py b/test/test_gromov.py
index bb23469..fb86274 100644
--- a/test/test_gromov.py
+++ b/test/test_gromov.py
@@ -15,7 +15,7 @@ def test_gromov():
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
- xs = ot.datasets.get_2D_samples_gauss(n_samples, mu_s, cov_s)
+ xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s)
xt = xs[::-1].copy()
@@ -55,7 +55,7 @@ def test_entropic_gromov():
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
- xs = ot.datasets.get_2D_samples_gauss(n_samples, mu_s, cov_s)
+ xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s)
xt = xs[::-1].copy()
@@ -96,8 +96,8 @@ def test_gromov_barycenter():
ns = 50
nt = 60
- Xs, ys = ot.datasets.get_data_classif('3gauss', ns)
- Xt, yt = ot.datasets.get_data_classif('3gauss2', nt)
+ Xs, ys = ot.datasets.make_data_classif('3gauss', ns)
+ Xt, yt = ot.datasets.make_data_classif('3gauss2', nt)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
@@ -123,8 +123,8 @@ def test_gromov_entropic_barycenter():
ns = 50
nt = 60
- Xs, ys = ot.datasets.get_data_classif('3gauss', ns)
- Xt, yt = ot.datasets.get_data_classif('3gauss2', nt)
+ Xs, ys = ot.datasets.make_data_classif('3gauss', ns)
+ Xt, yt = ot.datasets.make_data_classif('3gauss2', nt)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
@@ -133,13 +133,13 @@ def test_gromov_entropic_barycenter():
Cb = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
- 'square_loss', 1e-3,
+ 'square_loss', 2e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
- 'kl_loss', 1e-3,
+ 'kl_loss', 2e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
diff --git a/test/test_optim.py b/test/test_optim.py
index 69496a5..dfefe59 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -16,8 +16,8 @@ def test_conditional_gradient():
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
- a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
- b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
+ a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
+ b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
@@ -45,8 +45,8 @@ def test_generalized_conditional_gradient():
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
- a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
- b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
+ a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
+ b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
diff --git a/test/test_ot.py b/test/test_ot.py
index cc25bf4..399e549 100644
--- a/test/test_ot.py
+++ b/test/test_ot.py
@@ -9,7 +9,7 @@ import warnings
import numpy as np
import ot
-from ot.datasets import get_1D_gauss as gauss
+from ot.datasets import make_1D_gauss as gauss
import pytest
diff --git a/test/test_plot.py b/test/test_plot.py
index a50ed14..f77d879 100644
--- a/test/test_plot.py
+++ b/test/test_plot.py
@@ -20,8 +20,8 @@ def test_plot1D_mat():
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
- a = ot.datasets.get_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
- b = ot.datasets.get_1D_gauss(n_bins, m=60, s=10)
+ a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
+ b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
@@ -43,8 +43,8 @@ def test_plot2D_samples_mat():
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
- xs = ot.datasets.get_2D_samples_gauss(n_bins, mu_s, cov_s)
- xt = ot.datasets.get_2D_samples_gauss(n_bins, mu_t, cov_t)
+ xs = ot.datasets.make_2D_samples_gauss(n_bins, mu_s, cov_s)
+ xt = ot.datasets.make_2D_samples_gauss(n_bins, mu_t, cov_t)
G = 1.0 * (np.random.rand(n_bins, n_bins) < 0.01)