From 53fdbb7ddb2a3a5e8d6f75ad69f0da90d3b6b5e6 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 26 Aug 2015 12:10:01 +0200 Subject: reorganized directionality module --- setup.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'setup.py') diff --git a/setup.py b/setup.py index 960c684..c130fbd 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ else: if os.path.isfile("pyspike/cython/cython_add.c") and \ os.path.isfile("pyspike/cython/cython_profiles.c") and \ os.path.isfile("pyspike/cython/cython_distances.c") and \ - os.path.isfile("pyspike/directionality/cython/cython_directionality.c"): + os.path.isfile("pyspike/cython/cython_directionality.c"): use_c = True else: use_c = False @@ -40,8 +40,8 @@ if use_cython: # Cython is available, compile .pyx -> .c ["pyspike/cython/cython_profiles.pyx"]), Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.pyx"]), - Extension("pyspike.directionality.cython.cython_directionality", - ["pyspike/directionality/cython/cython_directionality.pyx"]) + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.pyx"]) ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries @@ -52,8 +52,8 @@ elif use_c: # c files are there, compile to binaries ["pyspike/cython/cython_profiles.c"]), Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.c"]), - Extension("pyspike.directionality.cython.cython_directionality", - ["pyspike/directionality/cython/cython_directionality.c"]) + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.c"]) ] # neither cython nor c files available -> automatic fall-back to python backend @@ -93,7 +93,7 @@ train similarity', package_data={ 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', 'cython/cython_distances.c', - 'directionality/cython/cython_directionality.c'], + 'cython/cython_directionality.c'], 'test': ['Spike_testdata.txt'] } ) -- cgit v1.2.3 From b9850d8c3e35290858e4ef0b2f05f66b0f7397b2 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sat, 10 Oct 2015 20:45:09 +0200 Subject: spike sync filtering, cython sim ann Added function for filtering out events based on a threshold for the spike sync values. Usefull for focusing on synchronous events during directionality analysis. Also added cython version of simulated annealing for performance. --- pyspike/__init__.py | 3 +- pyspike/cython/cython_distances.pyx | 200 ++++++++++++++++++++++++++ pyspike/cython/cython_profiles.pyx | 14 +- pyspike/cython/cython_simulated_annealing.pyx | 82 +++++++++++ pyspike/spike_directionality.py | 54 ++++--- pyspike/spike_sync.py | 19 ++- setup.py | 14 +- test/test_sync_filter.py | 61 +++++++- 8 files changed, 406 insertions(+), 41 deletions(-) create mode 100644 pyspike/cython/cython_simulated_annealing.pyx (limited to 'setup.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 0558ce0..4c1e47e 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -18,7 +18,8 @@ from isi_distance import isi_profile, isi_distance, isi_profile_multi,\ from spike_distance import spike_profile, spike_distance, spike_profile_multi,\ spike_distance_multi, spike_distance_matrix from spike_sync import spike_sync_profile, spike_sync,\ - spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix + spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix,\ + filter_by_spike_sync from psth import psth from spikes import load_spike_trains_from_txt, spike_train_from_string, \ diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index c4f2349..c017bf9 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -164,6 +164,8 @@ cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: return 0.5*(isi1+isi2)*(isi1+isi2) # alternative definition to obtain ~ 0.5 for Poisson spikes # return 0.5*(isi1*isi1+isi2*isi2) + # another alternative definition without second normalization + # return 0.5*(isi1+isi2) ############################################################ @@ -218,6 +220,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, index2 = 0 y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) index = 1 while index1+index2 < N1+N2-2: @@ -237,6 +241,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p1 s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -253,6 +259,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_p1*(t_end-t1[N1-1])/isi1 # s2 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): index2 += 1 # first calculate the previous interval end value @@ -268,6 +276,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p2 s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -284,6 +294,9 @@ def spike_distance_cython(double[:] t1, double[:] t2, s2 = dt_p2*(t_end-t2[N2-1])/isi2 # s1 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) + else: # t_f1 == t_f2 - generate only one event index1 += 1 index2 += 1 @@ -320,6 +333,193 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_f1*(t_end-t1[N1-1])/isi1 s2 = dt_f2*(t_end-t2[N2-1])/isi2 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) + # end nogil + + # use only the data added above + # could be less than original length due to equal spike times + return spike_value / (t_end-t_start) + + +############################################################ +# isi_avrg_rf_cython +############################################################ +cdef inline double isi_avrg_rf_cython(double isi1, double isi2) nogil: + # rate free version + return (isi1+isi2) + + +############################################################ +# spike_distance_rf_cython +############################################################ +def spike_distance_rf_cython(double[:] t1, double[:] t2, + double t_start, double t_end): + + cdef int N1, N2, index1, index2, index + cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 + cdef double isi1, isi2, s1, s2 + cdef double y_start, y_end, t_last, t_current, spike_value + + spike_value = 0.0 + + N1 = len(t1) + N2 = len(t2) + + with nogil: # release the interpreter to allow multithreading + t_last = t_start + t_p1 = t_start + t_p2 = t_start + if t1[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f1 = t1[0] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) + dt_p1 = dt_f1 + s1 = dt_p1*(t_f1-t_start)/isi1 + index1 = -1 + else: + t_f1 = t1[1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + dt_p1 = 0.0 + isi1 = t1[1]-t1[0] + s1 = dt_p1 + index1 = 0 + if t2[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f2 = t2[0] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = dt_f2 + isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) + s2 = dt_p2*(t_f2-t_start)/isi2 + index2 = -1 + else: + t_f2 = t2[1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = 0.0 + isi2 = t2[1]-t2[0] + s2 = dt_p2 + index2 = 0 + + # y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + index = 1 + + while index1+index2 < N1+N2-2: + # print(index, index1, index2) + if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): + index1 += 1 + # first calculate the previous interval end value + s1 = dt_f1*(t_f1-t_p1) / isi1 + # the previous time now was the following time before: + dt_p1 = dt_f1 + t_p1 = t_f1 # t_p1 contains the current time point + # get the next time + if index1 < N1-1: + t_f1 = t1[index1+1] + else: + t_f1 = t_end + t_curr = t_p1 + s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 + # y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index1 < N1-1: + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1-t_p1 + s1 = dt_p1 + else: + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # s1 needs adjustment due to change of isi1 + s1 = dt_p1*(t_end-t1[N1-1])/isi1 + # s2 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): + index2 += 1 + # first calculate the previous interval end value + s2 = dt_f2*(t_f2-t_p2) / isi2 + # the previous time now was the following time before: + dt_p2 = dt_f2 + t_p2 = t_f2 # t_p2 contains the current time point + # get the next time + if index2 < N2-1: + t_f2 = t2[index2+1] + else: + t_f2 = t_end + t_curr = t_p2 + s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index2 < N2-1: + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2-t_p2 + s2 = dt_p2 + else: + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + # s2 needs adjustment due to change of isi2 + s2 = dt_p2*(t_end-t2[N2-1])/isi2 + # s1 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + else: # t_f1 == t_f2 - generate only one event + index1 += 1 + index2 += 1 + t_p1 = t_f1 + t_p2 = t_f2 + dt_p1 = 0.0 + dt_p2 = 0.0 + t_curr = t_f1 + y_end = 0.0 + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + y_start = 0.0 + if index1 < N1-1: + t_f1 = t1[index1+1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1 - t_p1 + else: + t_f1 = t_end + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + if index2 < N2-1: + t_f2 = t2[index2+1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2 - t_p2 + else: + t_f2 = t_end + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + index += 1 + t_last = t_curr + # isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + s1 = dt_f1*(t_end-t1[N1-1])/isi1 + s2 = dt_f2*(t_end-t2[N2-1])/isi2 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) # end nogil diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index 3b33c5a..61d1b18 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -442,18 +442,20 @@ def coincidence_single_profile_cython(double[:] spikes1, double[:] spikes2, cdef double tau for i in xrange(N1): while j < N2-1 and spikes2[j+1] < spikes1[i]: + # move forward until spikes2[j] is the last spike before spikes1[i] + # note that if spikes2[j] is after spikes1[i] we dont do anything j += 1 tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) - print i, j, spikes1[i], spikes2[j], tau - if j > -1 and spikes1[i]-spikes2[j] < tau: + if j > -1 and fabs(spikes1[i]-spikes2[j]) < tau: # current spike in st1 is coincident c[i] = 1 - if j < N2-1: + if j < N2-1 and spikes2[j] < spikes1[i]: + # in case spikes2[j] is before spikes1[i] it has to be the one + # right before (see above), hence we move one forward and also + # check the next spike j += 1 tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) - print i, j, spikes1[i], spikes2[j], tau - if spikes2[j]-spikes1[i] < tau: + if fabs(spikes2[j]-spikes1[i]) < tau: # current spike in st1 is coincident c[i] = 1 - return c diff --git a/pyspike/cython/cython_simulated_annealing.pyx b/pyspike/cython/cython_simulated_annealing.pyx new file mode 100644 index 0000000..be9423c --- /dev/null +++ b/pyspike/cython/cython_simulated_annealing.pyx @@ -0,0 +1,82 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_simulated_annealing.pyx + +cython implementation of a simulated annealing algorithm to find the optimal +spike train order + +Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects +improves the performance of spike_distance by a factor of 10! + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_simulated_annealing.pyx + +which gives: + + cython_simulated_annealing.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport exp +from libc.math cimport fmod +from libc.stdlib cimport rand +from libc.stdlib cimport RAND_MAX + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +def sim_ann_cython(double[:, :] D, double T_start, double T_end, double alpha): + + cdef long N = len(D) + cdef double A = np.sum(np.triu(D, 0)) + cdef long[:] p = np.arange(N) + cdef double T = T_start + cdef long iterations + cdef long succ_iter + cdef long total_iter = 0 + cdef double delta_A + cdef long ind1 + cdef long ind2 + + while T > T_end: + iterations = 0 + succ_iter = 0 + # equilibrate for 100*N steps or 10*N successful steps + while iterations < 100*N and succ_iter < 10*N: + # exchange two rows and cols + # ind1 = np.random.randint(N-1) + ind1 = rand() % (N-1) + if ind1 < N-1: + ind2 = ind1+1 + else: # this can never happen! + ind2 = 0 + delta_A = -2*D[p[ind1], p[ind2]] + if delta_A > 0.0 or exp(delta_A/T) > ((1.0*rand()) / RAND_MAX): + # swap indices + p[ind1], p[ind2] = p[ind2], p[ind1] + A += delta_A + succ_iter += 1 + iterations += 1 + total_iter += iterations + T *= alpha # cool down + if succ_iter == 0: + # no successful step -> we believe we have converged + break + + return p, A, total_iter diff --git a/pyspike/spike_directionality.py b/pyspike/spike_directionality.py index cda7fe3..e1f5f16 100644 --- a/pyspike/spike_directionality.py +++ b/pyspike/spike_directionality.py @@ -242,27 +242,39 @@ def optimal_spike_train_order_from_matrix(D, full_output=False): p = np.arange(N) - T = 2*np.max(D) # starting temperature - T_end = 1E-5 * T # final temperature - alpha = 0.9 # cooling factor - total_iter = 0 - while T > T_end: - iterations = 0 - succ_iter = 0 - while iterations < 100*N and succ_iter < 10*N: - # exchange two rows and cols - ind1 = np.random.randint(N-1) - delta_A = -2*D[p[ind1], p[ind1+1]] - if delta_A > 0.0 or exp(delta_A/T) > np.random.random(): - # swap indices - p[ind1], p[ind1+1] = p[ind1+1], p[ind1] - A += delta_A - succ_iter += 1 - iterations += 1 - total_iter += iterations - T *= alpha # cool down - if succ_iter == 0: - break + T_start = 2*np.max(D) # starting temperature + T_end = 1E-5 * T_start # final temperature + alpha = 0.9 # cooling factor + + from cython.cython_simulated_annealing import sim_ann_cython as sim_ann + + p, A, total_iter = sim_ann(D, T_start, T_end, alpha) + + # T = T_start + # total_iter = 0 + # while T > T_end: + # iterations = 0 + # succ_iter = 0 + # # equilibrate for 100*N steps or 10*N successful steps + # while iterations < 100*N and succ_iter < 10*N: + # # exchange two rows and cols + # ind1 = np.random.randint(N-1) + # if ind1 < N-1: + # ind2 = ind1+1 + # else: # this can never happend + # ind2 = 0 + # delta_A = -2*D[p[ind1], p[ind2]] + # if delta_A > 0.0 or exp(delta_A/T) > np.random.random(): + # # swap indices + # p[ind1], p[ind2] = p[ind2], p[ind1] + # A += delta_A + # succ_iter += 1 + # iterations += 1 + # total_iter += iterations + # T *= alpha # cool down + # if succ_iter == 0: + # break + if full_output: return p, A, total_iter else: diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index f99018a..4dfea6a 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -217,12 +217,14 @@ def spike_sync_matrix(spike_trains, indices=None, interval=None, max_tau=None): ############################################################ # filter_by_spike_sync ############################################################ -def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None): +def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None, + return_removed_spikes=False): """ Removes the spikes with a multi-variate spike_sync value below threshold. """ N = len(spike_trains) filtered_spike_trains = [] + removed_spike_trains = [] # cython implementation try: @@ -230,7 +232,7 @@ def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None): as coincidence_impl except ImportError: if not(pyspike.disable_backend_warning): - print("Warning: coincidence_single_profile_cytho not found. Make \ + print("Warning: coincidence_single_profile_cython not found. Make \ sure that PySpike is installed by running\n \ 'python setup.py build_ext --inplace'!\n \ Falling back to slow python backend.") @@ -243,10 +245,19 @@ Falling back to slow python backend.") for i, st in enumerate(spike_trains): coincidences = np.zeros_like(st) - for j in range(N).remove(i): + for j in xrange(N): + if i == j: + continue coincidences += coincidence_impl(st.spikes, spike_trains[j].spikes, st.t_start, st.t_end, max_tau) filtered_spikes = st[coincidences > threshold*(N-1)] filtered_spike_trains.append(SpikeTrain(filtered_spikes, [st.t_start, st.t_end])) - return filtered_spike_trains + if return_removed_spikes: + removed_spikes = st[coincidences <= threshold*(N-1)] + removed_spike_trains.append(SpikeTrain(removed_spikes, + [st.t_start, st.t_end])) + if return_removed_spikes: + return [filtered_spike_trains, removed_spike_trains] + else: + return filtered_spike_trains diff --git a/setup.py b/setup.py index c130fbd..ce35773 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,8 @@ else: if os.path.isfile("pyspike/cython/cython_add.c") and \ os.path.isfile("pyspike/cython/cython_profiles.c") and \ os.path.isfile("pyspike/cython/cython_distances.c") and \ - os.path.isfile("pyspike/cython/cython_directionality.c"): + os.path.isfile("pyspike/cython/cython_directionality.c") and \ + os.path.isfile("pyspike/cython/cython_simulated_annealing.c"): use_c = True else: use_c = False @@ -41,7 +42,9 @@ if use_cython: # Cython is available, compile .pyx -> .c Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.pyx"]), Extension("pyspike.cython.cython_directionality", - ["pyspike/cython/cython_directionality.pyx"]) + ["pyspike/cython/cython_directionality.pyx"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.pyx"]) ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries @@ -53,7 +56,9 @@ elif use_c: # c files are there, compile to binaries Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.c"]), Extension("pyspike.cython.cython_directionality", - ["pyspike/cython/cython_directionality.c"]) + ["pyspike/cython/cython_directionality.c"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.c"]) ] # neither cython nor c files available -> automatic fall-back to python backend @@ -93,7 +98,8 @@ train similarity', package_data={ 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', 'cython/cython_distances.c', - 'cython/cython_directionality.c'], + 'cython/cython_directionality.c', + 'cython/cython_simulated_annealing.c'], 'test': ['Spike_testdata.txt'] } ) diff --git a/test/test_sync_filter.py b/test/test_sync_filter.py index ce03b23..66ffcb6 100644 --- a/test/test_sync_filter.py +++ b/test/test_sync_filter.py @@ -17,17 +17,18 @@ import pyspike as spk from pyspike import SpikeTrain -def test_cython(): +def test_single_prof(): st1 = np.array([1.0, 2.0, 3.0, 4.0]) st2 = np.array([1.1, 2.1, 3.8]) + st3 = np.array([0.9, 3.1, 4.1]) # cython implementation try: - from pyspike.cython.cython_profiles import coincidence_single_profile_cython \ - as coincidence_impl + from pyspike.cython.cython_profiles import \ + coincidence_single_profile_cython as coincidence_impl except ImportError: - from pyspike.cython.python_backend import coincidence_single_profile_python \ - as coincidence_impl + from pyspike.cython.python_backend import \ + coincidence_single_profile_python as coincidence_impl sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), SpikeTrain(st2, 5.0)) @@ -41,3 +42,53 @@ def test_cython(): for i, t in enumerate(st2): assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], "At index %d" % i) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st3, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st3, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], + "At index %d" % i) + + st1 = np.array([1.0, 2.0, 3.0, 4.0]) + st2 = np.array([1.0, 2.0, 4.0]) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st2, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st2, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + expected = sync_prof.y[sync_prof.x == t]/sync_prof.mp[sync_prof.x == t] + assert_equal(coincidences[i], expected, + "At index %d" % i) + + +def test_filter(): + st1 = SpikeTrain(np.array([1.0, 2.0, 3.0, 4.0]), 5.0) + st2 = SpikeTrain(np.array([1.1, 2.1, 3.8]), 5.0) + st3 = SpikeTrain(np.array([0.9, 3.1, 4.1]), 5.0) + + # filtered_spike_trains = spk.filter_by_spike_sync([st1, st2], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.0, 2.0, 4.0]) + # assert_equal(filtered_spike_trains[1].spikes, [1.1, 2.1, 3.8]) + + # filtered_spike_trains = spk.filter_by_spike_sync([st2, st1], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.1, 2.1, 3.8]) + # assert_equal(filtered_spike_trains[1].spikes, [1.0, 2.0, 4.0]) + + filtered_spike_trains = spk.filter_by_spike_sync([st1, st2, st3], 0.75) + + for st in filtered_spike_trains: + print(st.spikes) + + assert_equal(filtered_spike_trains[0].spikes, [1.0, 4.0]) + assert_equal(filtered_spike_trains[1].spikes, [1.1, 3.8]) + assert_equal(filtered_spike_trains[2].spikes, [0.9, 4.1]) + + +if __name__ == "main": + test_single_prof() + test_filter() -- cgit v1.2.3 From 0c710a2450a055091fe6de2313d6240671d1c74d Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 26 Aug 2015 12:10:01 +0200 Subject: reorganized directionality module --- pyspike/__init__.py | 9 +- pyspike/cython/cython_directionality.pyx | 223 ++++++++++++++++++++++ pyspike/cython/directionality_python_backend.py | 89 +++++++++ pyspike/spike_directionality.py | 244 ++++++++++++++++++++++++ setup.py | 19 +- test/test_directionality.py | 41 ++++ 6 files changed, 619 insertions(+), 6 deletions(-) create mode 100644 pyspike/cython/cython_directionality.pyx create mode 100644 pyspike/cython/directionality_python_backend.py create mode 100644 pyspike/spike_directionality.py create mode 100644 test/test_directionality.py (limited to 'setup.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 08253fb..7fa5265 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -7,8 +7,8 @@ Distributed under the BSD License from __future__ import absolute_import __all__ = ["isi_distance", "spike_distance", "spike_sync", "psth", - "spikes", "SpikeTrain", "PieceWiseConstFunc", "PieceWiseLinFunc", - "DiscreteFunc", "directionality"] + "spikes", "spike_directionality", "SpikeTrain", + "PieceWiseConstFunc", "PieceWiseLinFunc", "DiscreteFunc"] from .PieceWiseConstFunc import PieceWiseConstFunc from .PieceWiseLinFunc import PieceWiseLinFunc @@ -27,6 +27,11 @@ from .spikes import load_spike_trains_from_txt, save_spike_trains_to_txt, \ spike_train_from_string, import_spike_trains_from_time_series, \ merge_spike_trains, generate_poisson_spikes +from spike_directionality import spike_directionality, \ + spike_directionality_matrix, spike_train_order_profile, \ + optimal_spike_train_order_from_matrix, optimal_spike_train_order, \ + permutate_matrix + # define the __version__ following # http://stackoverflow.com/questions/17583443 from pkg_resources import get_distribution, DistributionNotFound diff --git a/pyspike/cython/cython_directionality.pyx b/pyspike/cython/cython_directionality.pyx new file mode 100644 index 0000000..e1f63c4 --- /dev/null +++ b/pyspike/cython/cython_directionality.pyx @@ -0,0 +1,223 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_directionality.pyx + +cython implementation of the spike delay asymmetry measures + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_directionality.pyx + +which gives:: + + cython_directionality.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport fabs +from libc.math cimport fmax +from libc.math cimport fmin + +# from pyspike.cython.cython_distances cimport get_tau + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +############################################################ +# get_tau +############################################################ +cdef inline double get_tau(double[:] spikes1, double[:] spikes2, + int i, int j, double interval, double max_tau): + cdef double m = interval # use interval length as initial tau + cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 + cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 + if i < N1 and i > -1: + m = fmin(m, spikes1[i+1]-spikes1[i]) + if j < N2 and j > -1: + m = fmin(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = fmin(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = fmin(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = fmin(m, max_tau) + return m + + +############################################################ +# spike_train_order_profile_cython +############################################################ +def spike_train_order_profile_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, + double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int n = 0 + cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times + cdef double[:] a = np.zeros(N1 + N2 + 2) # asymmetry values + cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + st[n] = spikes1[i] + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 after spike train 2 + # both get marked with -1 + a[n] = -1 + a[n-1] = -1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + st[n] = spikes2[j] + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 before spike train 2 + # both get marked with 1 + a[n] = 1 + a[n-1] = 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + n += 1 + # add only one event with zero asymmetry value and multiplicity 2 + st[n] = spikes1[i] + a[n] = 0 + mp[n] = 2 + + st = st[:n+2] + a = a[:n+2] + mp = mp[:n+2] + + st[0] = t_start + st[len(st)-1] = t_end + if N1 + N2 > 0: + a[0] = a[1] + a[len(a)-1] = a[len(a)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + a[0] = 1 + a[1] = 1 + + return st, a, mp + + + +############################################################ +# spike_order_values_cython +############################################################ +def spike_order_values_cython(double[:] spikes1, + double[:] spikes2, + double t_start, double t_end, + double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef double[:] a1 = np.zeros(N1) # asymmetry values + cdef double[:] a2 = np.zeros(N2) # asymmetry values + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 after spike train 2 + # leading spike gets +1, following spike -1 + a1[i] = -1 + a2[j] = +1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 before spike train 2 + # leading spike gets +1, following spike -1 + a1[i] = +1 + a2[j] = -1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + # equal spike times: zero asymmetry value + a1[i] = 0 + a2[j] = 0 + + return a1, a2 + + +############################################################ +# spike_train_order_cython +############################################################ +def spike_train_order_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int asym = 0 + cdef int mp = 0 + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike in spike train 2 appeared before spike in spike train 1 + # mark with -1 + asym -= 2 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike in spike train 1 appeared before spike in spike train 2 + # mark with +1 + asym += 2 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + # add only one event with multiplicity 2, but no asymmetry counting + mp += 2 + + if asym == 0 and mp == 0: + # empty spike trains -> spike sync = 1 by definition + asym = 1 + mp = 1 + + return asym, mp diff --git a/pyspike/cython/directionality_python_backend.py b/pyspike/cython/directionality_python_backend.py new file mode 100644 index 0000000..e14238f --- /dev/null +++ b/pyspike/cython/directionality_python_backend.py @@ -0,0 +1,89 @@ +""" directionality_python_backend.py + +Collection of python functions that can be used instead of the cython +implementation. + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +import numpy as np + + +############################################################ +# spike_train_order_python +############################################################ +def spike_train_order_python(spikes1, spikes2, t_start, t_end, max_tau): + + def get_tau(spikes1, spikes2, i, j, max_tau): + m = t_end - t_start # use interval as initial tau + if i < len(spikes1)-1 and i > -1: + m = min(m, spikes1[i+1]-spikes1[i]) + if j < len(spikes2)-1 and j > -1: + m = min(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = min(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = min(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = min(m, max_tau) + return m + + N1 = len(spikes1) + N2 = len(spikes2) + i = -1 + j = -1 + n = 0 + st = np.zeros(N1 + N2 + 2) # spike times + a = np.zeros(N1 + N2 + 2) # coincidences + mp = np.ones(N1 + N2 + 2) # multiplicity + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes1[i] + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + a[n] = -1 + a[n-1] = -1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes2[j] + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + a[n] = 1 + a[n-1] = 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + n += 1 + # add only one event with zero asymmetry value and multiplicity 2 + st[n] = spikes1[i] + a[n] = 0 + mp[n] = 2 + + st = st[:n+2] + a = a[:n+2] + mp = mp[:n+2] + + st[0] = t_start + st[len(st)-1] = t_end + if N1 + N2 > 0: + a[0] = a[1] + a[len(a)-1] = a[len(a)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + a[0] = 1 + a[1] = 1 + + return st, a, mp diff --git a/pyspike/spike_directionality.py b/pyspike/spike_directionality.py new file mode 100644 index 0000000..0e69cb5 --- /dev/null +++ b/pyspike/spike_directionality.py @@ -0,0 +1,244 @@ +# Module containing functions to compute the SPIKE directionality and the +# spike train order profile +# Copyright 2015, Mario Mulansky +# Distributed under the BSD License + +import numpy as np +from math import exp +import pyspike +from pyspike import DiscreteFunc + + +############################################################ +# spike_directionality +############################################################ +def spike_directionality(spike_train1, spike_train2, normalize=True, + interval=None, max_tau=None): + """ Computes the overall spike directionality for two spike trains. + """ + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from cython.cython_directionality import \ + spike_train_order_cython as spike_train_order_impl + if max_tau is None: + max_tau = 0.0 + c, mp = spike_train_order_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + except ImportError: + # Cython backend not available: fall back to profile averaging + c, mp = _spike_directionality_profile(spike_train1, + spike_train2, + max_tau).integral(interval) + if normalize: + return 1.0*c/mp + else: + return c + else: + # some specific interval is provided: not yet implemented + raise NotImplementedError() + + +############################################################ +# spike_directionality_matrix +############################################################ +def spike_directionality_matrix(spike_trains, normalize=True, indices=None, + interval=None, max_tau=None): + """ Computes the spike directionaity matrix for the given spike trains. + """ + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + distance_matrix = np.zeros((len(indices), len(indices))) + for i, j in pairs: + d = spike_directionality(spike_trains[i], spike_trains[j], normalize, + interval, max_tau=max_tau) + distance_matrix[i, j] = d + distance_matrix[j, i] = -d + return distance_matrix + + +############################################################ +# spike_train_order_profile +############################################################ +def spike_train_order_profile(spike_trains, indices=None, + interval=None, max_tau=None): + """ Computes the spike train symmetry value for each spike in each spike + train. + """ + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # list of arrays for reulting asymmetry values + asymmetry_list = [np.zeros_like(st.spikes) for st in spike_trains] + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + # cython implementation + try: + from cython.cython_directionality import \ + spike_order_values_cython as spike_order_values_impl + except ImportError: + raise NotImplementedError() +# if not(pyspike.disable_backend_warning): +# print("Warning: spike_distance_cython not found. Make sure that \ +# PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ +# Falling back to slow python backend.") +# # use python backend +# from cython.python_backend import coincidence_python \ +# as coincidence_profile_impl + + if max_tau is None: + max_tau = 0.0 + + for i, j in pairs: + a1, a2 = spike_order_values_impl(spike_trains[i].spikes, + spike_trains[j].spikes, + spike_trains[i].t_start, + spike_trains[i].t_end, + max_tau) + asymmetry_list[i] += a1 + asymmetry_list[j] += a2 + for a in asymmetry_list: + a /= len(spike_trains)-1 + return asymmetry_list + + +############################################################ +# optimal_spike_train_order_from_matrix +############################################################ +def optimal_spike_train_order_from_matrix(D, full_output=False): + """ finds the best sorting via simulated annealing. + Returns the optimal permutation p and A value. + Internal function, don't call directly! Use optimal_asymmetry_order + instead. + """ + N = len(D) + A = np.sum(np.triu(D, 0)) + + p = np.arange(N) + + T = 2*np.max(D) # starting temperature + T_end = 1E-5 * T # final temperature + alpha = 0.9 # cooling factor + total_iter = 0 + while T > T_end: + iterations = 0 + succ_iter = 0 + while iterations < 100*N and succ_iter < 10*N: + # exchange two rows and cols + ind1 = np.random.randint(N-1) + delta_A = -2*D[p[ind1], p[ind1+1]] + if delta_A > 0.0 or exp(delta_A/T) > np.random.random(): + # swap indices + p[ind1], p[ind1+1] = p[ind1+1], p[ind1] + A += delta_A + succ_iter += 1 + iterations += 1 + total_iter += iterations + T *= alpha # cool down + if succ_iter == 0: + break + if full_output: + return p, A, total_iter + else: + return p, A + + +############################################################ +# optimal_spike_train_order +############################################################ +def optimal_spike_train_order(spike_trains, indices=None, interval=None, + max_tau=None, full_output=False): + """ finds the best sorting of the given spike trains via simulated + annealing. + Returns the optimal permutation p and A value. + """ + D = spike_directionality_matrix(spike_trains, normalize=False, + indices=indices, interval=interval, + max_tau=max_tau) + return optimal_spike_train_order_from_matrix(D, full_output) + + +############################################################ +# permutate_matrix +############################################################ +def permutate_matrix(D, p): + """ Applies the permutation p to the columns and rows of matrix D. + Return the new permutated matrix. + """ + N = len(D) + D_p = np.empty_like(D) + for n in xrange(N): + for m in xrange(N): + D_p[n, m] = D[p[n], p[m]] + return D_p + + +# internal helper functions + +############################################################ +# _spike_directionality_profile +############################################################ +def _spike_directionality_profile(spike_train1, spike_train2, + max_tau=None): + """ Computes the spike delay asymmetry profile A(t) of the two given + spike trains. Returns the profile as a DiscreteFunction object. + + :param spike_train1: First spike train. + :type spike_train1: :class:`pyspike.SpikeTrain` + :param spike_train2: Second spike train. + :type spike_train2: :class:`pyspike.SpikeTrain` + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike-distance profile :math:`S_{sync}(t)`. + :rtype: :class:`pyspike.function.DiscreteFunction` + + """ + # check whether the spike trains are defined for the same interval + assert spike_train1.t_start == spike_train2.t_start, \ + "Given spike trains are not defined on the same interval!" + assert spike_train1.t_end == spike_train2.t_end, \ + "Given spike trains are not defined on the same interval!" + + # cython implementation + try: + from cython.cython_directionality import \ + spike_train_order_profile_cython as \ + spike_train_order_profile_impl + except ImportError: + # raise NotImplementedError() + if not(pyspike.disable_backend_warning): + print("Warning: spike_distance_cython not found. Make sure that \ +PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ +Falling back to slow python backend.") + # use python backend + from cython.directionality_python_backend import \ + spike_train_order_python as spike_train_order_profile_impl + + if max_tau is None: + max_tau = 0.0 + + times, coincidences, multiplicity \ + = spike_train_order_profile_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + + return DiscreteFunc(times, coincidences, multiplicity) diff --git a/setup.py b/setup.py index 5b9e677..9ba1da6 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,8 @@ class numpy_include(object): if os.path.isfile("pyspike/cython/cython_add.c") and \ os.path.isfile("pyspike/cython/cython_profiles.c") and \ - os.path.isfile("pyspike/cython/cython_distances.c"): + os.path.isfile("pyspike/cython/cython_distances.c") and \ + os.path.isfile("pyspike/cython/cython_directionality.c"): use_c = True else: use_c = False @@ -45,7 +46,9 @@ if use_cython: # Cython is available, compile .pyx -> .c Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.pyx"]), Extension("pyspike.cython.cython_distances", - ["pyspike/cython/cython_distances.pyx"]) + ["pyspike/cython/cython_distances.pyx"]), + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.pyx"]) ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries @@ -55,7 +58,9 @@ elif use_c: # c files are there, compile to binaries Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.c"]), Extension("pyspike.cython.cython_distances", - ["pyspike/cython/cython_distances.c"]) + ["pyspike/cython/cython_distances.c"]), + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.c"]) ] # neither cython nor c files available -> automatic fall-back to python backend @@ -96,5 +101,11 @@ train similarity', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' - ] + ], + package_data={ + 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', + 'cython/cython_distances.c', + 'cython/cython_directionality.c'], + 'test': ['Spike_testdata.txt'] + } ) diff --git a/test/test_directionality.py b/test/test_directionality.py new file mode 100644 index 0000000..5c3da00 --- /dev/null +++ b/test/test_directionality.py @@ -0,0 +1,41 @@ +""" test_spike_delay_asymmetry.py + +Tests the asymmetry functions + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_equal + +import pyspike as spk +from pyspike import SpikeTrain, DiscreteFunc +from pyspike.spike_directionality import _spike_directionality_profile + + +def test_profile(): + st1 = SpikeTrain([100, 200, 300], [0, 1000]) + st2 = SpikeTrain([105, 205, 300], [0, 1000]) + expected_x = np.array([0, 100, 105, 200, 205, 300, 1000]) + expected_y = np.array([1, 1, 1, 1, 1, 0, 0]) + expected_mp = np.array([1, 1, 1, 1, 1, 2, 2]) + + f = _spike_directionality_profile(st1, st2) + + assert f.almost_equal(DiscreteFunc(expected_x, expected_y, expected_mp)) + assert_almost_equal(f.avrg(), 2.0/3.0) + assert_almost_equal(spk.spike_directionality(st1, st2), 2.0/3.0) + assert_almost_equal(spk.spike_directionality(st1, st2, normalize=False), + 4.0) + + st3 = SpikeTrain([105, 195, 500], [0, 1000]) + expected_x = np.array([0, 100, 105, 195, 200, 300, 500, 1000]) + expected_y = np.array([1, 1, 1, -1, -1, 0, 0, 0]) + expected_mp = np.array([1, 1, 1, 1, 1, 1, 1, 1]) + + f = _spike_directionality_profile(st1, st3) + assert f.almost_equal(DiscreteFunc(expected_x, expected_y, expected_mp)) -- cgit v1.2.3 From 18ea80e2d01e9eb4ceee17219f91098efbcdf67c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sat, 10 Oct 2015 20:45:09 +0200 Subject: spike sync filtering, cython sim ann Added function for filtering out events based on a threshold for the spike sync values. Usefull for focusing on synchronous events during directionality analysis. Also added cython version of simulated annealing for performance. --- pyspike/__init__.py | 7 +- pyspike/cython/cython_distances.pyx | 200 ++++++++++++++++++++++++++ pyspike/cython/cython_profiles.pyx | 14 +- pyspike/cython/cython_simulated_annealing.pyx | 82 +++++++++++ pyspike/spike_directionality.py | 54 ++++--- pyspike/spike_sync.py | 19 ++- setup.py | 14 +- test/test_sync_filter.py | 61 +++++++- 8 files changed, 408 insertions(+), 43 deletions(-) create mode 100644 pyspike/cython/cython_simulated_annealing.pyx (limited to 'setup.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 10d2936..61c5c4f 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -19,9 +19,10 @@ from .isi_distance import isi_profile, isi_distance, isi_profile_multi,\ isi_distance_multi, isi_distance_matrix from .spike_distance import spike_profile, spike_distance, spike_profile_multi,\ spike_distance_multi, spike_distance_matrix -from .spike_sync import spike_sync_profile, spike_sync,\ - spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix -from .psth import psth +from spike_sync import spike_sync_profile, spike_sync,\ + spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix,\ + filter_by_spike_sync +from psth import psth from .spikes import load_spike_trains_from_txt, save_spike_trains_to_txt, \ spike_train_from_string, import_spike_trains_from_time_series, \ diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index ac5f226..d4070ae 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -178,6 +178,8 @@ cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: return 0.5*(isi1+isi2)*(isi1+isi2) # alternative definition to obtain ~ 0.5 for Poisson spikes # return 0.5*(isi1*isi1+isi2*isi2) + # another alternative definition without second normalization + # return 0.5*(isi1+isi2) ############################################################ @@ -248,6 +250,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, index2 = 0 y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) index = 1 while index1+index2 < N1+N2-2: @@ -267,6 +271,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p1 s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -286,6 +292,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_p1 # s2 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): index2 += 1 # first calculate the previous interval end value @@ -301,6 +309,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p2 s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -320,6 +330,9 @@ def spike_distance_cython(double[:] t1, double[:] t2, s2 = dt_p2 # s1 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) + else: # t_f1 == t_f2 - generate only one event index1 += 1 index2 += 1 @@ -358,6 +371,193 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_f1 # *(t_end-t1[N1-1])/isi1 s2 = dt_f2 # *(t_end-t2[N2-1])/isi2 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) + # end nogil + + # use only the data added above + # could be less than original length due to equal spike times + return spike_value / (t_end-t_start) + + +############################################################ +# isi_avrg_rf_cython +############################################################ +cdef inline double isi_avrg_rf_cython(double isi1, double isi2) nogil: + # rate free version + return (isi1+isi2) + + +############################################################ +# spike_distance_rf_cython +############################################################ +def spike_distance_rf_cython(double[:] t1, double[:] t2, + double t_start, double t_end): + + cdef int N1, N2, index1, index2, index + cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 + cdef double isi1, isi2, s1, s2 + cdef double y_start, y_end, t_last, t_current, spike_value + + spike_value = 0.0 + + N1 = len(t1) + N2 = len(t2) + + with nogil: # release the interpreter to allow multithreading + t_last = t_start + t_p1 = t_start + t_p2 = t_start + if t1[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f1 = t1[0] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) + dt_p1 = dt_f1 + s1 = dt_p1*(t_f1-t_start)/isi1 + index1 = -1 + else: + t_f1 = t1[1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + dt_p1 = 0.0 + isi1 = t1[1]-t1[0] + s1 = dt_p1 + index1 = 0 + if t2[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f2 = t2[0] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = dt_f2 + isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) + s2 = dt_p2*(t_f2-t_start)/isi2 + index2 = -1 + else: + t_f2 = t2[1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = 0.0 + isi2 = t2[1]-t2[0] + s2 = dt_p2 + index2 = 0 + + # y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + index = 1 + + while index1+index2 < N1+N2-2: + # print(index, index1, index2) + if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): + index1 += 1 + # first calculate the previous interval end value + s1 = dt_f1*(t_f1-t_p1) / isi1 + # the previous time now was the following time before: + dt_p1 = dt_f1 + t_p1 = t_f1 # t_p1 contains the current time point + # get the next time + if index1 < N1-1: + t_f1 = t1[index1+1] + else: + t_f1 = t_end + t_curr = t_p1 + s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 + # y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index1 < N1-1: + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1-t_p1 + s1 = dt_p1 + else: + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # s1 needs adjustment due to change of isi1 + s1 = dt_p1*(t_end-t1[N1-1])/isi1 + # s2 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): + index2 += 1 + # first calculate the previous interval end value + s2 = dt_f2*(t_f2-t_p2) / isi2 + # the previous time now was the following time before: + dt_p2 = dt_f2 + t_p2 = t_f2 # t_p2 contains the current time point + # get the next time + if index2 < N2-1: + t_f2 = t2[index2+1] + else: + t_f2 = t_end + t_curr = t_p2 + s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index2 < N2-1: + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2-t_p2 + s2 = dt_p2 + else: + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + # s2 needs adjustment due to change of isi2 + s2 = dt_p2*(t_end-t2[N2-1])/isi2 + # s1 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + else: # t_f1 == t_f2 - generate only one event + index1 += 1 + index2 += 1 + t_p1 = t_f1 + t_p2 = t_f2 + dt_p1 = 0.0 + dt_p2 = 0.0 + t_curr = t_f1 + y_end = 0.0 + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + y_start = 0.0 + if index1 < N1-1: + t_f1 = t1[index1+1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1 - t_p1 + else: + t_f1 = t_end + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + if index2 < N2-1: + t_f2 = t2[index2+1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2 - t_p2 + else: + t_f2 = t_end + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + index += 1 + t_last = t_curr + # isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + s1 = dt_f1*(t_end-t1[N1-1])/isi1 + s2 = dt_f2*(t_end-t2[N2-1])/isi2 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) # end nogil diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index fe08cb7..eb4d157 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -466,18 +466,20 @@ def coincidence_single_profile_cython(double[:] spikes1, double[:] spikes2, cdef double tau for i in xrange(N1): while j < N2-1 and spikes2[j+1] < spikes1[i]: + # move forward until spikes2[j] is the last spike before spikes1[i] + # note that if spikes2[j] is after spikes1[i] we dont do anything j += 1 tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) - print i, j, spikes1[i], spikes2[j], tau - if j > -1 and spikes1[i]-spikes2[j] < tau: + if j > -1 and fabs(spikes1[i]-spikes2[j]) < tau: # current spike in st1 is coincident c[i] = 1 - if j < N2-1: + if j < N2-1 and spikes2[j] < spikes1[i]: + # in case spikes2[j] is before spikes1[i] it has to be the one + # right before (see above), hence we move one forward and also + # check the next spike j += 1 tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) - print i, j, spikes1[i], spikes2[j], tau - if spikes2[j]-spikes1[i] < tau: + if fabs(spikes2[j]-spikes1[i]) < tau: # current spike in st1 is coincident c[i] = 1 - return c diff --git a/pyspike/cython/cython_simulated_annealing.pyx b/pyspike/cython/cython_simulated_annealing.pyx new file mode 100644 index 0000000..be9423c --- /dev/null +++ b/pyspike/cython/cython_simulated_annealing.pyx @@ -0,0 +1,82 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_simulated_annealing.pyx + +cython implementation of a simulated annealing algorithm to find the optimal +spike train order + +Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects +improves the performance of spike_distance by a factor of 10! + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_simulated_annealing.pyx + +which gives: + + cython_simulated_annealing.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport exp +from libc.math cimport fmod +from libc.stdlib cimport rand +from libc.stdlib cimport RAND_MAX + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +def sim_ann_cython(double[:, :] D, double T_start, double T_end, double alpha): + + cdef long N = len(D) + cdef double A = np.sum(np.triu(D, 0)) + cdef long[:] p = np.arange(N) + cdef double T = T_start + cdef long iterations + cdef long succ_iter + cdef long total_iter = 0 + cdef double delta_A + cdef long ind1 + cdef long ind2 + + while T > T_end: + iterations = 0 + succ_iter = 0 + # equilibrate for 100*N steps or 10*N successful steps + while iterations < 100*N and succ_iter < 10*N: + # exchange two rows and cols + # ind1 = np.random.randint(N-1) + ind1 = rand() % (N-1) + if ind1 < N-1: + ind2 = ind1+1 + else: # this can never happen! + ind2 = 0 + delta_A = -2*D[p[ind1], p[ind2]] + if delta_A > 0.0 or exp(delta_A/T) > ((1.0*rand()) / RAND_MAX): + # swap indices + p[ind1], p[ind2] = p[ind2], p[ind1] + A += delta_A + succ_iter += 1 + iterations += 1 + total_iter += iterations + T *= alpha # cool down + if succ_iter == 0: + # no successful step -> we believe we have converged + break + + return p, A, total_iter diff --git a/pyspike/spike_directionality.py b/pyspike/spike_directionality.py index cda7fe3..e1f5f16 100644 --- a/pyspike/spike_directionality.py +++ b/pyspike/spike_directionality.py @@ -242,27 +242,39 @@ def optimal_spike_train_order_from_matrix(D, full_output=False): p = np.arange(N) - T = 2*np.max(D) # starting temperature - T_end = 1E-5 * T # final temperature - alpha = 0.9 # cooling factor - total_iter = 0 - while T > T_end: - iterations = 0 - succ_iter = 0 - while iterations < 100*N and succ_iter < 10*N: - # exchange two rows and cols - ind1 = np.random.randint(N-1) - delta_A = -2*D[p[ind1], p[ind1+1]] - if delta_A > 0.0 or exp(delta_A/T) > np.random.random(): - # swap indices - p[ind1], p[ind1+1] = p[ind1+1], p[ind1] - A += delta_A - succ_iter += 1 - iterations += 1 - total_iter += iterations - T *= alpha # cool down - if succ_iter == 0: - break + T_start = 2*np.max(D) # starting temperature + T_end = 1E-5 * T_start # final temperature + alpha = 0.9 # cooling factor + + from cython.cython_simulated_annealing import sim_ann_cython as sim_ann + + p, A, total_iter = sim_ann(D, T_start, T_end, alpha) + + # T = T_start + # total_iter = 0 + # while T > T_end: + # iterations = 0 + # succ_iter = 0 + # # equilibrate for 100*N steps or 10*N successful steps + # while iterations < 100*N and succ_iter < 10*N: + # # exchange two rows and cols + # ind1 = np.random.randint(N-1) + # if ind1 < N-1: + # ind2 = ind1+1 + # else: # this can never happend + # ind2 = 0 + # delta_A = -2*D[p[ind1], p[ind2]] + # if delta_A > 0.0 or exp(delta_A/T) > np.random.random(): + # # swap indices + # p[ind1], p[ind2] = p[ind2], p[ind1] + # A += delta_A + # succ_iter += 1 + # iterations += 1 + # total_iter += iterations + # T *= alpha # cool down + # if succ_iter == 0: + # break + if full_output: return p, A, total_iter else: diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index d37731f..1d2ecdb 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -295,12 +295,14 @@ def spike_sync_matrix(spike_trains, indices=None, interval=None, max_tau=None): ############################################################ # filter_by_spike_sync ############################################################ -def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None): +def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None, + return_removed_spikes=False): """ Removes the spikes with a multi-variate spike_sync value below threshold. """ N = len(spike_trains) filtered_spike_trains = [] + removed_spike_trains = [] # cython implementation try: @@ -308,7 +310,7 @@ def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None): as coincidence_impl except ImportError: if not(pyspike.disable_backend_warning): - print("Warning: coincidence_single_profile_cytho not found. Make \ + print("Warning: coincidence_single_profile_cython not found. Make \ sure that PySpike is installed by running\n \ 'python setup.py build_ext --inplace'!\n \ Falling back to slow python backend.") @@ -321,10 +323,19 @@ Falling back to slow python backend.") for i, st in enumerate(spike_trains): coincidences = np.zeros_like(st) - for j in range(N).remove(i): + for j in xrange(N): + if i == j: + continue coincidences += coincidence_impl(st.spikes, spike_trains[j].spikes, st.t_start, st.t_end, max_tau) filtered_spikes = st[coincidences > threshold*(N-1)] filtered_spike_trains.append(SpikeTrain(filtered_spikes, [st.t_start, st.t_end])) - return filtered_spike_trains + if return_removed_spikes: + removed_spikes = st[coincidences <= threshold*(N-1)] + removed_spike_trains.append(SpikeTrain(removed_spikes, + [st.t_start, st.t_end])) + if return_removed_spikes: + return [filtered_spike_trains, removed_spike_trains] + else: + return filtered_spike_trains diff --git a/setup.py b/setup.py index 9ba1da6..808a122 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,8 @@ class numpy_include(object): if os.path.isfile("pyspike/cython/cython_add.c") and \ os.path.isfile("pyspike/cython/cython_profiles.c") and \ os.path.isfile("pyspike/cython/cython_distances.c") and \ - os.path.isfile("pyspike/cython/cython_directionality.c"): + os.path.isfile("pyspike/cython/cython_directionality.c") and \ + os.path.isfile("pyspike/cython/cython_simulated_annealing.c"): use_c = True else: use_c = False @@ -48,7 +49,9 @@ if use_cython: # Cython is available, compile .pyx -> .c Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.pyx"]), Extension("pyspike.cython.cython_directionality", - ["pyspike/cython/cython_directionality.pyx"]) + ["pyspike/cython/cython_directionality.pyx"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.pyx"]) ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries @@ -60,7 +63,9 @@ elif use_c: # c files are there, compile to binaries Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.c"]), Extension("pyspike.cython.cython_directionality", - ["pyspike/cython/cython_directionality.c"]) + ["pyspike/cython/cython_directionality.c"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.c"]) ] # neither cython nor c files available -> automatic fall-back to python backend @@ -105,7 +110,8 @@ train similarity', package_data={ 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', 'cython/cython_distances.c', - 'cython/cython_directionality.c'], + 'cython/cython_directionality.c', + 'cython/cython_simulated_annealing.c'], 'test': ['Spike_testdata.txt'] } ) diff --git a/test/test_sync_filter.py b/test/test_sync_filter.py index ce03b23..66ffcb6 100644 --- a/test/test_sync_filter.py +++ b/test/test_sync_filter.py @@ -17,17 +17,18 @@ import pyspike as spk from pyspike import SpikeTrain -def test_cython(): +def test_single_prof(): st1 = np.array([1.0, 2.0, 3.0, 4.0]) st2 = np.array([1.1, 2.1, 3.8]) + st3 = np.array([0.9, 3.1, 4.1]) # cython implementation try: - from pyspike.cython.cython_profiles import coincidence_single_profile_cython \ - as coincidence_impl + from pyspike.cython.cython_profiles import \ + coincidence_single_profile_cython as coincidence_impl except ImportError: - from pyspike.cython.python_backend import coincidence_single_profile_python \ - as coincidence_impl + from pyspike.cython.python_backend import \ + coincidence_single_profile_python as coincidence_impl sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), SpikeTrain(st2, 5.0)) @@ -41,3 +42,53 @@ def test_cython(): for i, t in enumerate(st2): assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], "At index %d" % i) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st3, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st3, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], + "At index %d" % i) + + st1 = np.array([1.0, 2.0, 3.0, 4.0]) + st2 = np.array([1.0, 2.0, 4.0]) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st2, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st2, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + expected = sync_prof.y[sync_prof.x == t]/sync_prof.mp[sync_prof.x == t] + assert_equal(coincidences[i], expected, + "At index %d" % i) + + +def test_filter(): + st1 = SpikeTrain(np.array([1.0, 2.0, 3.0, 4.0]), 5.0) + st2 = SpikeTrain(np.array([1.1, 2.1, 3.8]), 5.0) + st3 = SpikeTrain(np.array([0.9, 3.1, 4.1]), 5.0) + + # filtered_spike_trains = spk.filter_by_spike_sync([st1, st2], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.0, 2.0, 4.0]) + # assert_equal(filtered_spike_trains[1].spikes, [1.1, 2.1, 3.8]) + + # filtered_spike_trains = spk.filter_by_spike_sync([st2, st1], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.1, 2.1, 3.8]) + # assert_equal(filtered_spike_trains[1].spikes, [1.0, 2.0, 4.0]) + + filtered_spike_trains = spk.filter_by_spike_sync([st1, st2, st3], 0.75) + + for st in filtered_spike_trains: + print(st.spikes) + + assert_equal(filtered_spike_trains[0].spikes, [1.0, 4.0]) + assert_equal(filtered_spike_trains[1].spikes, [1.1, 3.8]) + assert_equal(filtered_spike_trains[2].spikes, [0.9, 4.1]) + + +if __name__ == "main": + test_single_prof() + test_filter() -- cgit v1.2.3