summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md7
-rw-r--r--test/test_stochastic.py47
2 files changed, 43 insertions, 11 deletions
diff --git a/README.md b/README.md
index 677a23b..1d3b097 100644
--- a/README.md
+++ b/README.md
@@ -24,6 +24,7 @@ It provides the following solvers:
* Wasserstein Discriminant Analysis [11] (requires autograd + pymanopt).
* Gromov-Wasserstein distances and barycenters ([13] and regularized [12])
* Stochastic Optimization for Large-scale Optimal Transport (semi-dual problem [18] and dual problem [19])
+* Non regularized free support Wasserstein barycenters [20].
Some demonstrations (both in Python and Jupyter Notebook format) are available in the examples folder.
@@ -163,7 +164,7 @@ The contributors to this library are:
* [Stanislas Chambon](https://slasnista.github.io/)
* [Antoine Rolet](https://arolet.github.io/)
* Erwan Vautier (Gromov-Wasserstein)
-* [Kilian Fatras](https://kilianfatras.github.io/) (Stochastic optimization)
+* [Kilian Fatras](https://kilianfatras.github.io/)
This toolbox benefit a lot from open source research and we would like to thank the following persons for providing some code (in various languages):
@@ -222,6 +223,8 @@ You can also post bug reports and feature requests in Github issues. Make sure t
[17] Blondel, M., Seguy, V., & Rolet, A. (2018). [Smooth and Sparse Optimal Transport](https://arxiv.org/abs/1710.06276). Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS).
-[18] Genevay, A., Cuturi, M., Peyré, G. & Bach, F. (2016) [Stochastic Optimization for Large-scale Optimal Transport](arXiv preprint arxiv:1605.08527). Advances in Neural Information Processing Systems (2016).
+[18] Genevay, A., Cuturi, M., Peyré, G. & Bach, F. (2016) [Stochastic Optimization for Large-scale Optimal Transport](https://arxiv.org/abs/1605.08527). Advances in Neural Information Processing Systems (2016).
[19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. [Large-scale Optimal Transport and Mapping Estimation](https://arxiv.org/pdf/1711.02283.pdf). International Conference on Learning Representation (2018)
+
+[20] Cuturi, M. and Doucet, A. (2014) [Fast Computation of Wasserstein Barycenters](http://proceedings.mlr.press/v32/cuturi14.html). International Conference in Machine Learning
diff --git a/test/test_stochastic.py b/test/test_stochastic.py
index f315c88..88ad666 100644
--- a/test/test_stochastic.py
+++ b/test/test_stochastic.py
@@ -137,8 +137,8 @@ def test_stochastic_dual_sgd():
# test sgd
n = 10
reg = 1
- numItermax = 300000
- batch_size = 8
+ numItermax = 15000
+ batch_size = 10
rng = np.random.RandomState(0)
x = rng.randn(n, 2)
@@ -151,9 +151,9 @@ def test_stochastic_dual_sgd():
# check constratints
np.testing.assert_allclose(
- u, G.sum(1), atol=1e-02) # cf convergence sgd
+ u, G.sum(1), atol=1e-04) # cf convergence sgd
np.testing.assert_allclose(
- u, G.sum(0), atol=1e-02) # cf convergence sgd
+ u, G.sum(0), atol=1e-04) # cf convergence sgd
#############################################################################
@@ -168,10 +168,11 @@ def test_dual_sgd_sinkhorn():
# test all dual algorithms
n = 10
reg = 1
- nb_iter = 300000
- batch_size = 8
+ nb_iter = 150000
+ batch_size = 10
rng = np.random.RandomState(0)
+# Test uniform
x = rng.randn(n, 2)
u = ot.utils.unif(n)
zero = np.zeros(n)
@@ -184,8 +185,36 @@ def test_dual_sgd_sinkhorn():
# check constratints
np.testing.assert_allclose(
- zero, (G_sgd - G_sinkhorn).sum(1), atol=1e-02) # cf convergence sgd
+ zero, (G_sgd - G_sinkhorn).sum(1), atol=1e-04) # cf convergence sgd
+ np.testing.assert_allclose(
+ zero, (G_sgd - G_sinkhorn).sum(0), atol=1e-04) # cf convergence sgd
+ np.testing.assert_allclose(
+ G_sgd, G_sinkhorn, atol=1e-04) # cf convergence sgd
+
+# Test gaussian
+ n = 30
+ n_source = n
+ n_target = n
+ reg = 1
+ numItermax = 150000
+ batch_size = 30
+
+ a = ot.datasets.get_1D_gauss(n_source, m=15, s=5) # m= mean, s= std
+ b = ot.datasets.get_1D_gauss(n_target, m=15, s=5)
+ X_source = np.arange(n_source,dtype=np.float64)
+ Y_target = np.arange(n_target,dtype=np.float64)
+ M = ot.dist(X_source.reshape((n_source, 1)), Y_target.reshape((n_target, 1)))
+ M /= M.max()
+
+ G_sgd = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size,
+ numItermax=nb_iter)
+
+ G_sinkhorn = ot.sinkhorn(a, b, M, reg)
+
+ # check constratints
+ np.testing.assert_allclose(
+ zero, (G_sgd - G_sinkhorn).sum(1), atol=1e-04) # cf convergence sgd
np.testing.assert_allclose(
- zero, (G_sgd - G_sinkhorn).sum(0), atol=1e-02) # cf convergence sgd
+ zero, (G_sgd - G_sinkhorn).sum(0), atol=1e-04) # cf convergence sgd
np.testing.assert_allclose(
- G_sgd, G_sinkhorn, atol=1e-02) # cf convergence sgd
+ G_sgd, G_sinkhorn, atol=1e-04) # cf convergence sgd