diff options
author | Nathan Cassereau <84033440+ncassereau-idris@users.noreply.github.com> | 2021-12-09 17:55:12 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-12-09 17:55:12 +0100 |
commit | f8d871e8c6f15009f559ece6a12eb8d8891c60fb (patch) | |
tree | 9aa46b2fcc8046c6cddd8e9159a6f607dcf0e1e9 /test/test_sliced.py | |
parent | b3dc68feac355fa94c4237f4ecad65edc9f7a7e8 (diff) |
[MRG] Tensorflow backend & Benchmarker & Myst_parser (#316)
* First batch of tf methods (to be continued)
* Second batch of method (yet to debug)
* tensorflow for cpu
* add tf requirement
* pep8 + bug
* small changes
* attempt to solve pymanopt bug with tf2
* attempt #2
* attempt #3
* attempt 4
* docstring
* correct pep8 violation introduced in merge conflicts resolution
* attempt 5
* attempt 6
* just a random try
* Revert "just a random try"
This reverts commit 8223e768bfe33635549fb66cca2267514a60ebbf.
* GPU tests for tensorflow
* pep8
* attempt to solve issue with m2r2
* Remove transpose backend method
* first draft of benchmarker (need to correct time measurement)
* prettier bench table
* Bitsize and prettier device methods
* prettified table bench
* Bug corrected (results were mixed up in the final table)
* Better perf counter (for GPU support)
* pep8
* EMD bench
* solve bug if no GPU available
* pep8
* warning about tensorflow numpy api being required in the backend.py docstring
* Bug solve in backend docstring
* not covering code which requires a GPU
* Tensorflow gradients manipulation tested
* Number of warmup runs is now customizable
* typo
* Remove some warnings while building docs
* Change prettier_device to device_type in backend
* Correct JAX mistakes preventing to see the CPU if a GPU is present
* Attempt to solve JAX bug in case no GPU is found
* Reworked benchmarks order and results storage & clear GPU after usage by benchmark
* Add bench to backend docstring
* better benchs
* remove useless stuff
* Better device_type
* Now using MYST_PARSER and solving links issue in the README.md / online docs
Diffstat (limited to 'test/test_sliced.py')
-rw-r--r-- | test/test_sliced.py | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/test/test_sliced.py b/test/test_sliced.py index 245202c..91e0961 100644 --- a/test/test_sliced.py +++ b/test/test_sliced.py @@ -10,6 +10,7 @@ import pytest import ot from ot.sliced import get_random_projections +from ot.backend import tf def test_get_random_projections(): @@ -161,6 +162,34 @@ def test_sliced_backend_type_devices(nx): nx.assert_same_dtype_device(xb, valb) +@pytest.mark.skipif(not tf, reason="tf not installed") +def test_sliced_backend_device_tf(): + nx = ot.backend.TensorflowBackend() + n = 100 + rng = np.random.RandomState(0) + x = rng.randn(n, 2) + y = rng.randn(2 * n, 2) + P = rng.randn(2, 20) + P = P / np.sqrt((P**2).sum(0, keepdims=True)) + + # Check that everything stays on the CPU + with tf.device("/CPU:0"): + xb = nx.from_numpy(x) + yb = nx.from_numpy(y) + Pb = nx.from_numpy(P) + valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb) + nx.assert_same_dtype_device(xb, valb) + + if len(tf.config.list_physical_devices('GPU')) > 0: + # Check that everything happens on the GPU + xb = nx.from_numpy(x) + yb = nx.from_numpy(y) + Pb = nx.from_numpy(P) + valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb) + nx.assert_same_dtype_device(xb, valb) + assert nx.dtype_device(valb)[1].startswith("GPU") + + def test_max_sliced_backend(nx): n = 100 @@ -211,3 +240,31 @@ def test_max_sliced_backend_type_devices(nx): valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) + + +@pytest.mark.skipif(not tf, reason="tf not installed") +def test_max_sliced_backend_device_tf(): + nx = ot.backend.TensorflowBackend() + n = 100 + rng = np.random.RandomState(0) + x = rng.randn(n, 2) + y = rng.randn(2 * n, 2) + P = rng.randn(2, 20) + P = P / np.sqrt((P**2).sum(0, keepdims=True)) + + # Check that everything stays on the CPU + with tf.device("/CPU:0"): + xb = nx.from_numpy(x) + yb = nx.from_numpy(y) + Pb = nx.from_numpy(P) + valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) + nx.assert_same_dtype_device(xb, valb) + + if len(tf.config.list_physical_devices('GPU')) > 0: + # Check that everything happens on the GPU + xb = nx.from_numpy(x) + yb = nx.from_numpy(y) + Pb = nx.from_numpy(P) + valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) + nx.assert_same_dtype_device(xb, valb) + assert nx.dtype_device(valb)[1].startswith("GPU") |