summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--pyspike/cython/cython_distances.pyx10
-rw-r--r--pyspike/cython/python_backend.py21
-rw-r--r--test/test_distance.py1
-rw-r--r--test/test_regression/regression_random_results_cSPIKY.matbin0 -> 149130 bytes
-rw-r--r--test/test_regression/regression_random_spikes.matbin0 -> 16241579 bytes
-rw-r--r--test/test_regression/test_regression_random_spikes.py113
6 files changed, 140 insertions, 5 deletions
diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx
index 41baa60..6c6e7e5 100644
--- a/pyspike/cython/cython_distances.pyx
+++ b/pyspike/cython/cython_distances.pyx
@@ -184,7 +184,8 @@ def spike_distance_cython(double[:] t1, double[:] t2,
N1 = len(t1)
N2 = len(t2)
- with nogil: # release the interpreter to allow multithreading
+ # with nogil: # release the interpreter to allow multithreading
+ if True:
t_last = t_start
# t_p1 = t_start
# t_p2 = t_start
@@ -193,6 +194,7 @@ def spike_distance_cython(double[:] t1, double[:] t2,
t_aux1[1] = fmax(t_end, t1[N1-1]+(t1[N1-1]-t1[N1-2]))
t_aux2[0] = fmin(t_start, t2[0]-(t2[1]-t2[0]))
t_aux2[1] = fmax(t_end, t2[N2-1]+(t2[N2-1]-t2[N2-2]))
+ # print "aux spikes %.15f, %.15f ; %.15f, %.15f" % (t_aux1[0], t_aux1[1], t_aux2[0], t_aux2[1])
t_p1 = t_start if (t1[0] == t_start) else t_aux1[0]
t_p2 = t_start if (t2[0] == t_start) else t_aux2[0]
if t1[0] > t_start:
@@ -207,7 +209,8 @@ def spike_distance_cython(double[:] t1, double[:] t2,
else:
t_f1 = t1[1]
dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_aux2[0], t_aux2[1])
- dt_p1 = 0.0
+ # dt_p1 = t_start-t_p2 # 0.0
+ dt_p1 = get_min_dist_cython(t_p1, t2, N2, 0, t_aux2[0], t_aux2[1])
isi1 = t1[1]-t1[0]
s1 = dt_p1
index1 = 0
@@ -223,7 +226,8 @@ def spike_distance_cython(double[:] t1, double[:] t2,
else:
t_f2 = t2[1]
dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_aux1[0], t_aux1[1])
- dt_p2 = 0.0
+ # dt_p2 = t_start-t_p1 # 0.0
+ dt_p2 = get_min_dist_cython(t_p2, t1, N1, 0, t_aux1[0], t_aux1[1])
isi2 = t2[1]-t2[0]
s2 = dt_p2
index2 = 0
diff --git a/pyspike/cython/python_backend.py b/pyspike/cython/python_backend.py
index a5f7ae4..a007a7f 100644
--- a/pyspike/cython/python_backend.py
+++ b/pyspike/cython/python_backend.py
@@ -153,6 +153,8 @@ def spike_distance_python(spikes1, spikes2, t_start, t_end):
t_p1 = t_start if (t1[0] == t_start) else t_aux1[0]
t_p2 = t_start if (t2[0] == t_start) else t_aux2[0]
+ print "t_aux1", t_aux1, ", t_aux2:", t_aux2
+
spike_events[0] = t_start
if t1[0] > t_start:
t_f1 = t1[0]
@@ -163,7 +165,8 @@ def spike_distance_python(spikes1, spikes2, t_start, t_end):
s1 = dt_p1
index1 = -1
else:
- dt_p1 = 0.0
+ # dt_p1 = t_start-t_p2
+ dt_p1 = get_min_dist(t_p1, t2, 0, t_aux2[0], t_aux2[1])
t_f1 = t1[1]
dt_f1 = get_min_dist(t_f1, t2, 0, t_aux2[0], t_aux2[1])
isi1 = t1[1]-t1[0]
@@ -179,13 +182,18 @@ def spike_distance_python(spikes1, spikes2, t_start, t_end):
s2 = dt_p2
index2 = -1
else:
- dt_p2 = 0.0
+ dt_p2 = t_start-t_p1
+ dt_p2 = get_min_dist(t_p2, t1, 0, t_aux1[0], t_aux1[1])
t_f2 = t2[1]
dt_f2 = get_min_dist(t_f2, t1, 0, t_aux1[0], t_aux1[1])
isi2 = t2[1]-t2[0]
s2 = dt_p2
index2 = 0
+ print "t_p1:", repr(t_p1), ", t_f1:", repr(t_f1), ", dt_p1:", repr(dt_p1), ", dt_f1:", repr(dt_f1)
+ print "t_p2:", repr(t_p2), ", t_f2:", repr(t_f2), ", dt_p2:", repr(dt_p2), ", dt_f2:", repr(dt_f2)
+ print "s1: ", repr(s1), ", s2:", repr(s2)
+
y_starts[0] = (s1*isi2 + s2*isi1) / (0.5*(isi1+isi2)**2)
index = 1
@@ -276,6 +284,11 @@ def spike_distance_python(spikes1, spikes2, t_start, t_end):
dt_f2 = dt_p2
isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2])
index += 1
+
+ print "t_p1:", repr(t_p1), ", t_f1:", repr(t_f1), ", dt_p1:", repr(dt_p1), ", dt_f1:", repr(dt_f1)
+ print "t_p2:", repr(t_p2), ", t_f2:", repr(t_f2), ", dt_p2:", repr(dt_p2), ", dt_f2:", repr(dt_f2)
+ print "s1: ", repr(s1), ", s2:", repr(s2)
+
# the last event is the interval end
if spike_events[index-1] == t_end:
index -= 1
@@ -288,6 +301,10 @@ def spike_distance_python(spikes1, spikes2, t_start, t_end):
s2 = dt_f2 # *(t_end-t2[N2-1])/isi2
y_ends[index-1] = (s1*isi2 + s2*isi1) / (0.5*(isi1+isi2)**2)
+ print "t_p1:", repr(t_p1), ", t_f1:", repr(t_f1), ", dt_p1:", repr(dt_p1), ", dt_f1:", repr(dt_f1)
+ print "t_p2:", repr(t_p2), ", t_f2:", repr(t_f2), ", dt_p2:", repr(dt_p2), ", dt_f2:", repr(dt_f2)
+ print "s1: ", repr(s1), ", s2:", repr(s2)
+
# use only the data added above
# could be less than original length due to equal spike times
return spike_events[:index+1], y_starts[:index], y_ends[:index]
diff --git a/test/test_distance.py b/test/test_distance.py
index 8cf81e2..083d8a3 100644
--- a/test/test_distance.py
+++ b/test/test_distance.py
@@ -20,6 +20,7 @@ from pyspike import SpikeTrain
import os
TEST_PATH = os.path.dirname(os.path.realpath(__file__))
+
def test_isi():
# generate two spike trains:
t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0)
diff --git a/test/test_regression/regression_random_results_cSPIKY.mat b/test/test_regression/regression_random_results_cSPIKY.mat
new file mode 100644
index 0000000..47c9b50
--- /dev/null
+++ b/test/test_regression/regression_random_results_cSPIKY.mat
Binary files differ
diff --git a/test/test_regression/regression_random_spikes.mat b/test/test_regression/regression_random_spikes.mat
new file mode 100644
index 0000000..e5ebeb1
--- /dev/null
+++ b/test/test_regression/regression_random_spikes.mat
Binary files differ
diff --git a/test/test_regression/test_regression_random_spikes.py b/test/test_regression/test_regression_random_spikes.py
new file mode 100644
index 0000000..757bab2
--- /dev/null
+++ b/test/test_regression/test_regression_random_spikes.py
@@ -0,0 +1,113 @@
+""" regression benchmark
+
+Copyright 2015, Mario Mulansky <mario.mulansky@gmx.net>
+
+Distributed under the BSD License
+"""
+import numpy as np
+from scipy.io import loadmat
+import pyspike as spk
+
+from numpy.testing import assert_almost_equal
+
+spk.disable_backend_warning = True
+
+
+def test_regression_random():
+
+ spike_file = "test/test_regression/regression_random_spikes.mat"
+ spikes_name = "spikes"
+ result_name = "Distances"
+ result_file = "test/test_regression/regression_random_results_cSPIKY.mat"
+
+ spike_train_sets = loadmat(spike_file)[spikes_name][0]
+ results_cSPIKY = loadmat(result_file)[result_name]
+
+ for i, spike_train_data in enumerate(spike_train_sets):
+ spike_trains = []
+ for spikes in spike_train_data[0]:
+ spike_trains.append(spk.SpikeTrain(spikes.flatten(), 100.0))
+
+ isi = spk.isi_distance_multi(spike_trains)
+ spike = spk.spike_distance_multi(spike_trains)
+ # spike_sync = spk.spike_sync_multi(spike_trains)
+
+ assert_almost_equal(isi, results_cSPIKY[i][0], decimal=14,
+ err_msg="Index: %d, ISI" % i)
+ assert_almost_equal(spike, results_cSPIKY[i][1], decimal=14,
+ err_msg="Index: %d, SPIKE" % i)
+
+
+def check_regression_dataset(spike_file="benchmark.mat",
+ spikes_name="spikes",
+ result_file="results_cSPIKY.mat",
+ result_name="Distances"):
+ """ Debuging function """
+ np.set_printoptions(precision=15)
+
+ spike_train_sets = loadmat(spike_file)[spikes_name][0]
+
+ results_cSPIKY = loadmat(result_file)[result_name]
+
+ err_max = 0.0
+ err_max_ind = -1
+ err_count = 0
+
+ for i, spike_train_data in enumerate(spike_train_sets):
+ spike_trains = []
+ for spikes in spike_train_data[0]:
+ spike_trains.append(spk.SpikeTrain(spikes.flatten(), 100.0))
+
+ isi = spk.isi_distance_multi(spike_trains)
+ spike = spk.spike_distance_multi(spike_trains)
+ # spike_sync = spk.spike_sync_multi(spike_trains)
+
+ if abs(isi - results_cSPIKY[i][0]) > 1E-14:
+ print "Error in ISI:", i, isi, results_cSPIKY[i][0]
+ print "Spike trains:"
+ for st in spike_trains:
+ print st.spikes
+
+ err = abs(spike - results_cSPIKY[i][1])
+ if err > 1E-14:
+ err_count += 1
+ if err > err_max:
+ err_max = err
+ err_max_ind = i
+
+ print "Total Errors:", err_count
+
+ if err_max_ind > -1:
+ print "Max SPIKE distance error:", err_max, "at index:", err_max_ind
+ spike_train_data = spike_train_sets[err_max_ind]
+ for spikes in spike_train_data[0]:
+ print spikes.flatten()
+
+
+def check_single_spike_train_set(index):
+ """ Debuging function """
+ np.set_printoptions(precision=15)
+
+ spike_train_sets = loadmat("benchmark.mat")['spikes'][0]
+
+ results_cSPIKY = loadmat("results_cSPIKY.mat")['Distances']
+
+ spike_train_data = spike_train_sets[index]
+
+ spike_trains = []
+ for spikes in spike_train_data[0]:
+ print "Spikes:", spikes.flatten()
+ spike_trains.append(spk.SpikeTrain(spikes.flatten(), 100.0))
+
+ print spk.spike_distance_multi(spike_trains)
+
+ print results_cSPIKY[index][1]
+
+ print spike_trains[1].spikes
+
+
+if __name__ == "__main__":
+
+ test_regression_random()
+ # check_regression_dataset()
+ # check_single_spike_train_set(7633)