summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGard Spreemann <gspr@nonempty.org>2021-12-31 10:46:47 +0100
committerGard Spreemann <gspr@nonempty.org>2021-12-31 10:48:22 +0100
commita5e4ecb7ae9ff950dcb54aa155f1d12c6bdb8c7a (patch)
tree2ae0ad27f05acb8074da75e8fc8026354a9bf155
parent5af0437082f9158dcb65bbea54624c2341077a5b (diff)
Add patch to remove benchmarksdebian/0.8.1+dfsg-2
This is just a temporary fix for #1002879, and should instead be properly fixed upstream.
-rw-r--r--debian/changelog6
-rw-r--r--debian/patches/0001-Remove-benchmarks-to-prevent-installation-in-wrong-p.patch231
-rw-r--r--debian/patches/series1
3 files changed, 238 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog
index 03e245a..edab601 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+python-pot (0.8.1+dfsg-2) unstable; urgency=medium
+
+ * Add patch to remove benchmarks. (Closes: #1002879)
+
+ -- Gard Spreemann <gspr@nonempty.org> Fri, 31 Dec 2021 10:47:43 +0100
+
python-pot (0.8.1+dfsg-1) unstable; urgency=medium
* New upstream version.
diff --git a/debian/patches/0001-Remove-benchmarks-to-prevent-installation-in-wrong-p.patch b/debian/patches/0001-Remove-benchmarks-to-prevent-installation-in-wrong-p.patch
new file mode 100644
index 0000000..28ae7d5
--- /dev/null
+++ b/debian/patches/0001-Remove-benchmarks-to-prevent-installation-in-wrong-p.patch
@@ -0,0 +1,231 @@
+From: Gard Spreemann <gspr@nonempty.org>
+Date: Fri, 31 Dec 2021 10:46:27 +0100
+Subject: Remove benchmarks to prevent installation in wrong path
+
+---
+ benchmarks/__init__.py | 5 ---
+ benchmarks/benchmark.py | 105 -------------------------------------------
+ benchmarks/emd.py | 40 -----------------
+ benchmarks/sinkhorn_knopp.py | 42 -----------------
+ 4 files changed, 192 deletions(-)
+ delete mode 100644 benchmarks/__init__.py
+ delete mode 100644 benchmarks/benchmark.py
+ delete mode 100644 benchmarks/emd.py
+ delete mode 100644 benchmarks/sinkhorn_knopp.py
+
+diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
+deleted file mode 100644
+index 37f5e56..0000000
+--- a/benchmarks/__init__.py
++++ /dev/null
+@@ -1,5 +0,0 @@
+-from . import benchmark
+-from . import sinkhorn_knopp
+-from . import emd
+-
+-__all__= ["benchmark", "sinkhorn_knopp", "emd"]
+diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
+deleted file mode 100644
+index 7973c6b..0000000
+--- a/benchmarks/benchmark.py
++++ /dev/null
+@@ -1,105 +0,0 @@
+-# /usr/bin/env python3
+-# -*- coding: utf-8 -*-
+-
+-from ot.backend import get_backend_list, jax, tf
+-import gc
+-
+-
+-def setup_backends():
+- if jax:
+- from jax.config import config
+- config.update("jax_enable_x64", True)
+-
+- if tf:
+- from tensorflow.python.ops.numpy_ops import np_config
+- np_config.enable_numpy_behavior()
+-
+-
+-def exec_bench(setup, tested_function, param_list, n_runs, warmup_runs):
+- backend_list = get_backend_list()
+- for i, nx in enumerate(backend_list):
+- if nx.__name__ == "tf" and i < len(backend_list) - 1:
+- # Tensorflow should be the last one to be benchmarked because
+- # as far as I'm aware, there is no way to force it to release
+- # GPU memory. Hence, if any other backend is benchmarked after
+- # Tensorflow and requires the usage of a GPU, it will not have the
+- # full memory available and you may have a GPU Out Of Memory error
+- # even though your GPU can technically hold your tensors in memory.
+- backend_list.pop(i)
+- backend_list.append(nx)
+- break
+-
+- inputs = [setup(param) for param in param_list]
+- results = dict()
+- for nx in backend_list:
+- for i in range(len(param_list)):
+- print(nx, param_list[i])
+- args = inputs[i]
+- results_nx = nx._bench(
+- tested_function,
+- *args,
+- n_runs=n_runs,
+- warmup_runs=warmup_runs
+- )
+- gc.collect()
+- results_nx_with_param_in_key = dict()
+- for key in results_nx:
+- new_key = (param_list[i], *key)
+- results_nx_with_param_in_key[new_key] = results_nx[key]
+- results.update(results_nx_with_param_in_key)
+- return results
+-
+-
+-def convert_to_html_table(results, param_name, main_title=None, comments=None):
+- string = "<table>\n"
+- keys = list(results.keys())
+- params, names, devices, bitsizes = zip(*keys)
+-
+- devices_names = sorted(list(set(zip(devices, names))))
+- params = sorted(list(set(params)))
+- bitsizes = sorted(list(set(bitsizes)))
+- length = len(devices_names) + 1
+- cpus_cols = list(devices).count("CPU") / len(bitsizes) / len(params)
+- gpus_cols = list(devices).count("GPU") / len(bitsizes) / len(params)
+- assert cpus_cols + gpus_cols == len(devices_names)
+-
+- if main_title is not None:
+- string += f'<tr><th align="center" colspan="{length}">{str(main_title)}</th></tr>\n'
+-
+- for i, bitsize in enumerate(bitsizes):
+-
+- if i != 0:
+- string += f'<tr><td colspan="{length}">&nbsp;</td></tr>\n'
+-
+- # make bitsize header
+- text = f"{bitsize} bits"
+- if comments is not None:
+- text += " - "
+- if isinstance(comments, (tuple, list)) and len(comments) == len(bitsizes):
+- text += str(comments[i])
+- else:
+- text += str(comments)
+- string += f'<tr><th align="center">Bitsize</th>'
+- string += f'<th align="center" colspan="{length - 1}">{text}</th></tr>\n'
+-
+- # make device header
+- string += f'<tr><th align="center">Device</th>'
+- string += f'<th align="center" colspan="{cpus_cols}">CPU</th>'
+- string += f'<th align="center" colspan="{gpus_cols}">GPU</th></tr>\n'
+-
+- # make param_name / backend header
+- string += f'<tr><th align="center">{param_name}</th>'
+- for device, name in devices_names:
+- string += f'<th align="center">{name}</th>'
+- string += "</tr>\n"
+-
+- # make results rows
+- for param in params:
+- string += f'<tr><td align="center">{param}</td>'
+- for device, name in devices_names:
+- key = (param, name, device, bitsize)
+- string += f'<td align="center">{results[key]:.4f}</td>'
+- string += "</tr>\n"
+-
+- string += "</table>"
+- return string
+diff --git a/benchmarks/emd.py b/benchmarks/emd.py
+deleted file mode 100644
+index 9f64863..0000000
+--- a/benchmarks/emd.py
++++ /dev/null
+@@ -1,40 +0,0 @@
+-# /usr/bin/env python3
+-# -*- coding: utf-8 -*-
+-
+-import numpy as np
+-import ot
+-from .benchmark import (
+- setup_backends,
+- exec_bench,
+- convert_to_html_table
+-)
+-
+-
+-def setup(n_samples):
+- rng = np.random.RandomState(789465132)
+- x = rng.randn(n_samples, 2)
+- y = rng.randn(n_samples, 2)
+-
+- a = ot.utils.unif(n_samples)
+- M = ot.dist(x, y)
+- return a, M
+-
+-
+-if __name__ == "__main__":
+- n_runs = 100
+- warmup_runs = 10
+- param_list = [50, 100, 500, 1000, 2000, 5000]
+-
+- setup_backends()
+- results = exec_bench(
+- setup=setup,
+- tested_function=lambda a, M: ot.emd(a, a, M),
+- param_list=param_list,
+- n_runs=n_runs,
+- warmup_runs=warmup_runs
+- )
+- print(convert_to_html_table(
+- results,
+- param_name="Sample size",
+- main_title=f"EMD - Averaged on {n_runs} runs"
+- ))
+diff --git a/benchmarks/sinkhorn_knopp.py b/benchmarks/sinkhorn_knopp.py
+deleted file mode 100644
+index 3a1ef3f..0000000
+--- a/benchmarks/sinkhorn_knopp.py
++++ /dev/null
+@@ -1,42 +0,0 @@
+-# /usr/bin/env python3
+-# -*- coding: utf-8 -*-
+-
+-import numpy as np
+-import ot
+-from .benchmark import (
+- setup_backends,
+- exec_bench,
+- convert_to_html_table
+-)
+-
+-
+-def setup(n_samples):
+- rng = np.random.RandomState(123456789)
+- a = rng.rand(n_samples // 4, 100)
+- b = rng.rand(n_samples, 100)
+-
+- wa = ot.unif(n_samples // 4)
+- wb = ot.unif(n_samples)
+-
+- M = ot.dist(a.copy(), b.copy())
+- return wa, wb, M
+-
+-
+-if __name__ == "__main__":
+- n_runs = 100
+- warmup_runs = 10
+- param_list = [50, 100, 500, 1000, 2000, 5000]
+-
+- setup_backends()
+- results = exec_bench(
+- setup=setup,
+- tested_function=lambda *args: ot.bregman.sinkhorn(*args, reg=1, stopThr=1e-7),
+- param_list=param_list,
+- n_runs=n_runs,
+- warmup_runs=warmup_runs
+- )
+- print(convert_to_html_table(
+- results,
+- param_name="Sample size",
+- main_title=f"Sinkhorn Knopp - Averaged on {n_runs} runs"
+- ))
diff --git a/debian/patches/series b/debian/patches/series
new file mode 100644
index 0000000..a82b8eb
--- /dev/null
+++ b/debian/patches/series
@@ -0,0 +1 @@
+0001-Remove-benchmarks-to-prevent-installation-in-wrong-p.patch