From 03440863ae92c14abae25f54bad9349119b0ec64 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sat, 27 Sep 2014 14:34:46 +0200 Subject: added test for functions, bug fixes in add --- test/test_function.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 test/test_function.py (limited to 'test/test_function.py') diff --git a/test/test_function.py b/test/test_function.py new file mode 100644 index 0000000..386b999 --- /dev/null +++ b/test/test_function.py @@ -0,0 +1,53 @@ +""" test_function.py + +Tests the PieceWiseConst and PieceWiseLinear functions + +Copyright 2014, Mario Mulansky +""" + +from __future__ import print_function +import numpy as np +from copy import copy +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_almost_equal + +import pyspike as spk + +def test_pwc(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f = spk.PieceWiseConstFunc(x, y) + xp, yp = f.get_plottable_data() + + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] + yp_expected = [1.0, 1.0, -0.5, -0.5, 1.5, 1.5, 0.75, 0.75] + assert_array_almost_equal(xp, xp_expected) + assert_array_almost_equal(yp, yp_expected) + + assert_almost_equal(f.avrg(), (1.0-0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) + assert_almost_equal(f.abs_avrg(), (1.0+0.5+0.5*1.5+1.5*0.75)/4.0, + decimal=16) + + f1 = copy(f) + x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] + y = [0.5, 1.0, -0.25, 0.0, 1.5] + f2 = spk.PieceWiseConstFunc(x, y) + f1.add(f2) + x_expected = [0.0, 0.75, 1.0, 2.0, 2.5, 2.7, 4.0] + y_expected = [1.5, 2.0, 0.5, 1.25, 0.75, 2.25] + assert_array_almost_equal(f1.x, x_expected, decimal=16) + assert_array_almost_equal(f1.y, y_expected, decimal=16) + + f2.add(f) + assert_array_almost_equal(f2.x, x_expected, decimal=16) + assert_array_almost_equal(f2.y, y_expected, decimal=16) + + f1.add(f2) + # same x, but y doubled + assert_array_almost_equal(f1.x, f2.x, decimal=16) + assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) + + +if __name__ == "__main__": + test_pwc() -- cgit v1.2.3 From 604c7cfe89ebaa37bc51fbfabb656b8aa7829554 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sat, 27 Sep 2014 18:37:42 +0200 Subject: + addition for piece wise linear function with tests --- pyspike/function.py | 106 ++++++++++++++++++++++++++++++++++++++++++++++++-- test/test_function.py | 49 ++++++++++++++++++++++- 2 files changed, 150 insertions(+), 5 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/function.py b/pyspike/function.py index ef6b0f1..b705293 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -10,6 +10,9 @@ from __future__ import print_function import numpy as np +############################################################## +# PieceWiseConstFunc +############################################################## class PieceWiseConstFunc: """ A class representing a piece-wise constant function. """ @@ -106,6 +109,10 @@ class PieceWiseConstFunc: self.x = x_new[:index+2] self.y = y_new[:index+1] + +############################################################## +# PieceWiseLinFunc +############################################################## class PieceWiseLinFunc: """ A class representing a piece-wise linear function. """ @@ -119,9 +126,9 @@ class PieceWiseLinFunc: - y2: array of length N defining the function values at the right of the intervals. """ - self.x = x - self.y1 = y1 - self.y2 = y2 + self.x = np.array(x) + self.y1 = np.array(y1) + self.y2 = np.array(y2) def get_plottable_data(self): """ Returns two arrays containing x- and y-coordinates for immeditate @@ -136,3 +143,96 @@ class PieceWiseLinFunc: y_plot[1::2] = self.y2 return x_plot, y_plot + def avrg(self): + """ Computes the average of the piece-wise linear function: + a = 1/T int f(x) dx where T is the length of the interval. + Returns: + - the average a. + """ + return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ + (self.x[-1]-self.x[0]) + + def abs_avrg(self): + """ Computes the absolute average of the piece-wise linear function: + a = 1/T int |f(x)| dx where T is the length of the interval. + Returns: + - the average a. + """ + return np.sum((self.x[1:]-self.x[:-1]) * 0.5 * + (np.abs(self.y1)+np.abs(self.y2)))/(self.x[-1]-self.x[0]) + + def add(self, f): + """ Adds another PieceWiseLin function to this function. + Note: only functions defined on the same interval can be summed. + Params: + - f: PieceWiseLin function to be added. + """ + assert self.x[0] == f.x[0], "The functions have different intervals" + assert self.x[-1] == f.x[-1], "The functions have different intervals" + x_new = np.empty(len(self.x) + len(f.x)) + y1_new = np.empty(len(x_new)-1) + y2_new = np.empty_like(y1_new) + x_new[0] = self.x[0] + y1_new[0] = self.y1[0] + f.y1[0] + index1 = 0 # index for self + index2 = 0 # index for f + index = 0 # index for new + while (index1+1 < len(self.y1)) and (index2+1 < len(f.y1)): + # print(index1+1, self.x[index1+1], self.y[index1+1], x_new[index]) + if self.x[index1+1] < f.x[index2+1]: + # first compute the end value of the previous interval + # linear interpolation of the interval + y = f.y1[index2] + (f.y2[index2]-f.y1[index2]) * \ + (self.x[index1+1]-f.x[index2]) / (f.x[index2+1]-f.x[index2]) + y2_new[index] = self.y2[index1] + y + index1 += 1 + index += 1 + x_new[index] = self.x[index1] + # and the starting value for the next interval + y1_new[index] = self.y1[index1] + y + elif self.x[index1+1] > f.x[index2+1]: + # first compute the end value of the previous interval + # linear interpolation of the interval + y = self.y1[index1] + (self.y2[index1]-self.y1[index1]) * \ + (f.x[index2+1]-self.x[index1]) / \ + (self.x[index1+1]-self.x[index1]) + y2_new[index] = f.y2[index2] + y + index2 += 1 + index += 1 + x_new[index] = f.x[index2] + # and the starting value for the next interval + y1_new[index] = f.y1[index2] + y + else: # self.x[index1+1] == f.x[index2+1]: + y2_new[index] = self.y2[index1] + f.y2[index2] + index1 += 1 + index2 += 1 + index += 1 + x_new[index] = self.x[index1] + y1_new[index] = self.y1[index1] + f.y1[index2] + # one array reached the end -> copy the contents of the other to the end + if index1+1 < len(self.y1): + # compute the linear interpolations values + y = f.y1[index2] + (f.y2[index2]-f.y1[index2]) * \ + (self.x[index1+1:-1]-f.x[index2]) / (f.x[index2+1]-f.x[index2]) + x_new[index+1:index+1+len(self.x)-index1-1] = self.x[index1+1:] + y1_new[index+1:index+1+len(self.y1)-index1-1] = self.y1[index1+1:]+y + y2_new[index:index+len(self.y2)-index1-1] = self.y2[index1:-1] + y + index += len(self.x)-index1-2 + elif index2+1 < len(f.y1): + # compute the linear interpolations values + y = self.y1[index1] + (self.y2[index1]-self.y1[index1]) * \ + (f.x[index2+1:-1]-self.x[index1]) / \ + (self.x[index1+1]-self.x[index1]) + x_new[index+1:index+1+len(f.x)-index2-1] = f.x[index2+1:] + y1_new[index+1:index+1+len(f.y1)-index2-1] = f.y1[index2+1:] + y + y2_new[index:index+len(f.y2)-index2-1] = f.y2[index2:-1] + y + index += len(f.x)-index2-2 + else: # both arrays reached the end simultaneously + # only the last x-value missing + x_new[index+1] = self.x[-1] + # finally, the end value for the last interval + y2_new[index] = self.y2[-1]+f.y2[-1] + # only use the data that was actually filled + self.x = x_new[:index+2] + self.y1 = y1_new[:index+1] + self.y2 = y2_new[:index+1] diff --git a/test/test_function.py b/test/test_function.py index 386b999..014ecac 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -22,8 +22,8 @@ def test_pwc(): xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] yp_expected = [1.0, 1.0, -0.5, -0.5, 1.5, 1.5, 0.75, 0.75] - assert_array_almost_equal(xp, xp_expected) - assert_array_almost_equal(yp, yp_expected) + assert_array_almost_equal(xp, xp_expected, decimal=16) + assert_array_almost_equal(yp, yp_expected, decimal=16) assert_almost_equal(f.avrg(), (1.0-0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) assert_almost_equal(f.abs_avrg(), (1.0+0.5+0.5*1.5+1.5*0.75)/4.0, @@ -49,5 +49,50 @@ def test_pwc(): assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) +def test_pwl(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f = spk.PieceWiseLinFunc(x, y1, y2) + xp, yp = f.get_plottable_data() + + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] + yp_expected = [1.0, 1.5, -0.5, -0.4, 1.5, 1.5, 0.75, 0.25] + assert_array_almost_equal(xp, xp_expected, decimal=16) + assert_array_almost_equal(yp, yp_expected, decimal=16) + + avrg_expected = (1.25 - 0.45 + 0.75 + 1.5*0.5) / 4.0 + assert_almost_equal(f.avrg(), avrg_expected, decimal=16) + + abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 + assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) + + f1 = copy(f) + x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] + y1 = [0.5, 1.0, -0.25, 0.0, 1.5] + y2 = [0.8, 0.2, -1.0, 0.0, 2.0] + f2 = spk.PieceWiseLinFunc(x, y1, y2) + f1.add(f2) + x_expected = [0.0, 0.75, 1.0, 2.0, 2.5, 2.7, 4.0] + y1_expected = [1.5, 1.0+1.0+0.5*0.75, -0.5+1.0-0.8*0.25/1.25, 1.5-0.25, + 0.75, 1.5+0.75-0.5*0.2/1.5] + y2_expected = [0.8+1.0+0.5*0.75, 1.5+1.0-0.8*0.25/1.25, -0.4+0.2, 1.5-1.0, + 0.75-0.5*0.2/1.5, 2.25] + assert_array_almost_equal(f1.x, x_expected, decimal=16) + assert_array_almost_equal(f1.y1, y1_expected, decimal=16) + assert_array_almost_equal(f1.y2, y2_expected, decimal=16) + + f2.add(f) + assert_array_almost_equal(f2.x, x_expected, decimal=16) + assert_array_almost_equal(f2.y1, y1_expected, decimal=16) + assert_array_almost_equal(f2.y2, y2_expected, decimal=16) + + f1.add(f2) + # same x, but y doubled + assert_array_almost_equal(f1.x, f2.x, decimal=16) + assert_array_almost_equal(f1.y1, 2*f2.y1, decimal=16) + assert_array_almost_equal(f1.y2, 2*f2.y2, decimal=16) + + if __name__ == "__main__": test_pwc() -- cgit v1.2.3 From e4f1c09672068e4778f7b5f3e27b47ff8986863c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 29 Sep 2014 12:55:56 +0200 Subject: +mul_scalar, tests restructured and cosmetics --- pyspike/distances.py | 14 ++++++++++++++ pyspike/function.py | 15 +++++++++++++++ pyspike/spikes.py | 6 ++++++ test/test_function.py | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+) (limited to 'test/test_function.py') diff --git a/pyspike/distances.py b/pyspike/distances.py index 10b1d3c..f4be625 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -9,6 +9,10 @@ import numpy as np from pyspike import PieceWiseConstFunc, PieceWiseLinFunc + +############################################################ +# add_auxiliary_spikes +############################################################ def add_auxiliary_spikes( spike_train, T_end , T_start=0.0): """ Adds spikes at the beginning (T_start) and end (T_end) of the observation interval. @@ -29,6 +33,10 @@ def add_auxiliary_spikes( spike_train, T_end , T_start=0.0): spike_train = np.append(spike_train, T_end) return spike_train + +############################################################ +# isi_distance +############################################################ def isi_distance(spikes1, spikes2): """ Computes the instantaneous isi-distance S_isi (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the @@ -95,6 +103,9 @@ def isi_distance(spikes1, spikes2): return PieceWiseConstFunc(spike_events[:index+1], isi_values[:index]) +############################################################ +# get_min_dist +############################################################ def get_min_dist(spike_time, spike_train, start_index=0): """ Returns the minimal distance |spike_time - spike_train[i]| with i>=start_index. @@ -111,6 +122,9 @@ def get_min_dist(spike_time, spike_train, start_index=0): return d +############################################################ +# spike_distance +############################################################ def spike_distance(spikes1, spikes2): """ Computes the instantaneous spike-distance S_spike (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the diff --git a/pyspike/function.py b/pyspike/function.py index b705293..3a5a01c 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -109,6 +109,13 @@ class PieceWiseConstFunc: self.x = x_new[:index+2] self.y = y_new[:index+1] + def mul_scalar(self, fac): + """ Multiplies the function with a scalar value + Params: + - fac: Value to multiply + """ + self.y *= fac + ############################################################## # PieceWiseLinFunc @@ -236,3 +243,11 @@ class PieceWiseLinFunc: self.x = x_new[:index+2] self.y1 = y1_new[:index+1] self.y2 = y2_new[:index+1] + + def mul_scalar(self, fac): + """ Multiplies the function with a scalar value + Params: + - fac: Value to multiply + """ + self.y1 *= fac + self.y2 *= fac diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6b2eea3..70b48ff 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -7,6 +7,9 @@ Copyright 2014, Mario Mulansky import numpy as np +############################################################ +# spike_train_from_string +############################################################ def spike_train_from_string(s, sep=' '): """ Converts a string of times into an array of spike times. Params: @@ -18,6 +21,9 @@ def spike_train_from_string(s, sep=' '): return np.fromstring(s, sep=sep) +############################################################ +# merge_spike_trains +############################################################ def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. Params: diff --git a/test/test_function.py b/test/test_function.py index 014ecac..7420011 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -29,6 +29,13 @@ def test_pwc(): assert_almost_equal(f.abs_avrg(), (1.0+0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) + +def test_pwc_add(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f = spk.PieceWiseConstFunc(x, y) + f1 = copy(f) x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] y = [0.5, 1.0, -0.25, 0.0, 1.5] @@ -48,6 +55,17 @@ def test_pwc(): assert_array_almost_equal(f1.x, f2.x, decimal=16) assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) +def test_pwc_mul(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f = spk.PieceWiseConstFunc(x, y) + + f.mul_scalar(1.5) + assert_array_almost_equal(f.x, x, decimal=16) + assert_array_almost_equal(f.y, 1.5*np.array(y), decimal=16) + f.mul_scalar(1.0/5.0) + assert_array_almost_equal(f.y, 1.5/5.0*np.array(y), decimal=16) + def test_pwl(): x = [0.0, 1.0, 2.0, 2.5, 4.0] @@ -67,6 +85,13 @@ def test_pwl(): abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) + +def test_pwl_add(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f = spk.PieceWiseLinFunc(x, y1, y2) + f1 = copy(f) x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] y1 = [0.5, 1.0, -0.25, 0.0, 1.5] @@ -94,5 +119,19 @@ def test_pwl(): assert_array_almost_equal(f1.y2, 2*f2.y2, decimal=16) +def test_pwc_mul(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f = spk.PieceWiseLinFunc(x, y1, y2) + + f.mul_scalar(1.5) + assert_array_almost_equal(f.x, x, decimal=16) + assert_array_almost_equal(f.y1, 1.5*np.array(y1), decimal=16) + assert_array_almost_equal(f.y2, 1.5*np.array(y2), decimal=16) + f.mul_scalar(1.0/5.0) + assert_array_almost_equal(f.y1, 1.5/5.0*np.array(y1), decimal=16) + assert_array_almost_equal(f.y2, 1.5/5.0*np.array(y2), decimal=16) + if __name__ == "__main__": test_pwc() -- cgit v1.2.3 From c1c5403b8274bd19aa1e71933cfaefe1ba622e59 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 10 Oct 2014 17:23:28 +0200 Subject: added License note in headers --- examples/isi_matrix.py | 11 +++++++++++ examples/merge.py | 28 ++++++++++++++++++++++++++++ examples/plot.py | 42 ++++++++++++++++++++++++++++++++++++++++++ examples/test_data.py | 34 ---------------------------------- examples/test_merge.py | 20 -------------------- pyspike/__init__.py | 6 ++++++ pyspike/cython_distance.pyx | 3 +++ pyspike/distances.py | 2 ++ pyspike/function.py | 2 ++ pyspike/python_backend.py | 4 +++- pyspike/spikes.py | 2 ++ test/test_distance.py | 3 +++ test/test_function.py | 2 ++ 13 files changed, 104 insertions(+), 55 deletions(-) create mode 100644 examples/merge.py create mode 100644 examples/plot.py delete mode 100644 examples/test_data.py delete mode 100644 examples/test_merge.py (limited to 'test/test_function.py') diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index 0d6e185..3297d3d 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -1,3 +1,14 @@ +""" isi_matrix.py + +Simple example showing how to compute the isi distance matrix of a set of spike +trains. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + + from __future__ import print_function import numpy as np diff --git a/examples/merge.py b/examples/merge.py new file mode 100644 index 0000000..55c7f0a --- /dev/null +++ b/examples/merge.py @@ -0,0 +1,28 @@ +""" merge.py + +Simple example showing the merging of two spike trains. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +import pyspike as spk + +# first load the data, ending time = 4000 +spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", 4000) + +spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) + +print(spikes) + +plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') +plt.plot(spike_trains[1], np.ones_like(spike_trains[1]), 'x') +plt.plot(spikes, 2*np.ones_like(spikes), 'o') + +plt.show() diff --git a/examples/plot.py b/examples/plot.py new file mode 100644 index 0000000..d7e2173 --- /dev/null +++ b/examples/plot.py @@ -0,0 +1,42 @@ +""" plot.py + +Simple example showing how to load and plot spike trains and their distances. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + + +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +import pyspike as spk + +spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=(0,4000)) + +# plot the spike time +for (i,spikes) in enumerate(spike_trains): + plt.plot(spikes, i*np.ones_like(spikes), 'o') + +f = spk.isi_distance(spike_trains[0], spike_trains[1]) +x, y = f.get_plottable_data() + +plt.figure() +plt.plot(x, np.abs(y), '--k') + +print("Average: %.8f" % f.avrg()) +print("Absolute average: %.8f" % f.abs_avrg()) + + +f = spk.spike_distance(spike_trains[0], spike_trains[1]) +x, y = f.get_plottable_data() +print(x) +print(y) +#plt.figure() +plt.plot(x, y, '-b') + +plt.show() diff --git a/examples/test_data.py b/examples/test_data.py deleted file mode 100644 index dcd0f20..0000000 --- a/examples/test_data.py +++ /dev/null @@ -1,34 +0,0 @@ -# compute the isi distance of some test data - -from __future__ import print_function - -import numpy as np -import matplotlib.pyplot as plt - -import pyspike as spk - -spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", - time_interval=(0,4000)) - -# plot the spike time -for (i,spikes) in enumerate(spike_trains): - plt.plot(spikes, i*np.ones_like(spikes), 'o') - -f = spk.isi_distance(spike_trains[0], spike_trains[1]) -x, y = f.get_plottable_data() - -plt.figure() -plt.plot(x, np.abs(y), '--k') - -print("Average: %.8f" % f.avrg()) -print("Absolute average: %.8f" % f.abs_avrg()) - - -f = spk.spike_distance(spike_trains[0], spike_trains[1]) -x, y = f.get_plottable_data() -print(x) -print(y) -#plt.figure() -plt.plot(x, y, '-b') - -plt.show() diff --git a/examples/test_merge.py b/examples/test_merge.py deleted file mode 100644 index 0c34608..0000000 --- a/examples/test_merge.py +++ /dev/null @@ -1,20 +0,0 @@ -# compute the isi distance of some test data -from __future__ import print_function - -import numpy as np -import matplotlib.pyplot as plt - -import pyspike as spk - -# first load the data, ending time = 4000 -spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", 4000) - -spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) - -print(spikes) - -plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') -plt.plot(spike_trains[1], np.ones_like(spike_trains[1]), 'x') -plt.plot(spikes, 2*np.ones_like(spikes), 'o') - -plt.show() diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 2703f65..3867e6e 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,3 +1,9 @@ +""" +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + __all__ = ["function", "distances", "spikes"] from function import PieceWiseConstFunc, PieceWiseLinFunc diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index 2be8525..4ab4381 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -11,6 +11,9 @@ Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects improves the performance of spike_distance by a factor of 10! Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) + """ """ diff --git a/pyspike/distances.py b/pyspike/distances.py index da603ad..db04c4e 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -3,6 +3,8 @@ Module containing several functions to compute spike distances Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ import numpy as np diff --git a/pyspike/function.py b/pyspike/function.py index 5444c36..243ef67 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -5,6 +5,8 @@ functions. Copyright 2014, Mario Mulansky +Distributed under the MIT License (MIT) + """ from __future__ import print_function diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index 9134149..e5b74e9 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -3,7 +3,9 @@ Collection of python functions that can be used instead of the cython implementation. -Copyright 2014, Mario Mulanksy +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 502c460..9375e30 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -3,6 +3,8 @@ Module containing several function to load and transform spike trains Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ import numpy as np diff --git a/test/test_distance.py b/test/test_distance.py index 84d0af9..dafe693 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -3,6 +3,9 @@ Tests the isi- and spike-distance computation Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) + """ from __future__ import print_function diff --git a/test/test_function.py b/test/test_function.py index 7420011..c0fb3fd 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -3,6 +3,8 @@ Tests the PieceWiseConst and PieceWiseLinear functions Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ from __future__ import print_function -- cgit v1.2.3 From 4274c328a4927b392036d1c3b759b0787b05f300 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 13 Oct 2014 10:47:18 +0200 Subject: code formatting following PEP8 --- examples/isi_matrix.py | 2 -- examples/plot.py | 6 ++-- pyspike/distances.py | 77 ++++++++++++++++++++++++----------------------- pyspike/function.py | 35 +++++++++++---------- pyspike/python_backend.py | 72 ++++++++++++++++++++++---------------------- pyspike/spikes.py | 48 ++++++++++++++--------------- test/test_distance.py | 37 ++++++++++++----------- test/test_function.py | 28 ++++++++++------- test/test_spikes.py | 27 ++++++++--------- 9 files changed, 168 insertions(+), 164 deletions(-) (limited to 'test/test_function.py') diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index 2a4d075..db740dd 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -11,7 +11,6 @@ Distributed under the MIT License (MIT) from __future__ import print_function -import numpy as np import matplotlib.pyplot as plt import pyspike as spk @@ -25,4 +24,3 @@ m = spk.isi_distance_matrix(spike_trains) plt.imshow(m, interpolation='none') plt.show() - diff --git a/examples/plot.py b/examples/plot.py index 4ff75c4..5c3ad4a 100644 --- a/examples/plot.py +++ b/examples/plot.py @@ -15,11 +15,11 @@ import matplotlib.pyplot as plt import pyspike as spk -spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) +spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) # plot the spike time -for (i,spikes) in enumerate(spike_trains): +for (i, spikes) in enumerate(spike_trains): plt.plot(spikes, i*np.ones_like(spikes), 'o') f = spk.isi_distance(spike_trains[0], spike_trains[1]) diff --git a/pyspike/distances.py b/pyspike/distances.py index db04c4e..b2eec92 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -17,7 +17,7 @@ from pyspike import PieceWiseConstFunc, PieceWiseLinFunc # isi_distance ############################################################ def isi_distance(spikes1, spikes2): - """ Computes the instantaneous isi-distance S_isi (t) of the two given + """ Computes the instantaneous isi-distance S_isi (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the beginning and end of the interval. Use the function add_auxiliary_spikes to add those spikes to the spike train. @@ -27,9 +27,9 @@ def isi_distance(spikes1, spikes2): - PieceWiseConstFunc describing the isi-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # cython implementation @@ -53,9 +53,9 @@ def spike_distance(spikes1, spikes2): - PieceWiseLinFunc describing the spike-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # cython implementation @@ -74,33 +74,33 @@ def multi_distance(spike_trains, pair_distance_func, indices=None): use isi_distance_multi or spike_distance_multi instead. Computes the multi-variate distance for a set of spike-trains using the - pair_dist_func to compute pair-wise distances. That is it computes the + pair_dist_func to compute pair-wise distances. That is it computes the average distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i,j}, where the sum goes over all pairs . Args: - spike_trains: list of spike trains - pair_distance_func: function computing the distance of two spike trains - - indices: list of indices defining which spike trains to use, + - indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) Returns: - The averaged multi-variate distance of all pairs """ - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] # start with first pair - (i,j) = pairs[0] + (i, j) = pairs[0] average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - for (i,j) in pairs[1:]: + for (i, j) in pairs[1:]: current_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - average_dist.add(current_dist) # add to the average - average_dist.mul_scalar(1.0/len(pairs)) # normalize + average_dist.add(current_dist) # add to the average + average_dist.mul_scalar(1.0/len(pairs)) # normalize return average_dist @@ -113,45 +113,46 @@ def multi_distance_par(spike_trains, pair_distance_func, indices=None): """ num_threads = 2 - lock = threading.Lock() + def run(spike_trains, index_pairs, average_dist): - (i,j) = index_pairs[0] + (i, j) = index_pairs[0] # print(i,j) this_avrg = pair_distance_func(spike_trains[i], spike_trains[j]) - for (i,j) in index_pairs[1:]: + for (i, j) in index_pairs[1:]: # print(i,j) current_dist = pair_distance_func(spike_trains[i], spike_trains[j]) this_avrg.add(current_dist) with lock: - average_dist.add(this_avrg) + average_dist.add(this_avrg) - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] num_pairs = len(pairs) # start with first pair - (i,j) = pairs[0] + (i, j) = pairs[0] average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) # remove the one we already computed pairs = pairs[1:] # distribute the rest into num_threads pieces - clustered_pairs = [ pairs[i::num_threads] for i in xrange(num_threads) ] + clustered_pairs = [pairs[n::num_threads] for n in xrange(num_threads)] threads = [] for pairs in clustered_pairs: - t = threading.Thread(target=run, args=(spike_trains, pairs, average_dist)) + t = threading.Thread(target=run, args=(spike_trains, pairs, + average_dist)) threads.append(t) t.start() for t in threads: t.join() - average_dist.mul_scalar(1.0/num_pairs) # normalize + average_dist.mul_scalar(1.0/num_pairs) # normalize return average_dist @@ -161,11 +162,11 @@ def multi_distance_par(spike_trains, pair_distance_func, indices=None): def isi_distance_multi(spike_trains, indices=None): """ computes the multi-variate isi-distance for a set of spike-trains. That is the average isi-distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i,j}, where the sum goes over all pairs Args: - spike_trains: list of spike trains - - indices: list of indices defining which spike trains to use, + - indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) Returns: - A PieceWiseConstFunc representing the averaged isi distance S @@ -177,13 +178,13 @@ def isi_distance_multi(spike_trains, indices=None): # spike_distance_multi ############################################################ def spike_distance_multi(spike_trains, indices=None): - """ computes the multi-variate spike-distance for a set of spike-trains. + """ computes the multi-variate spike-distance for a set of spike-trains. That is the average spike-distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i, j}, where the sum goes over all pairs Args: - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use, + - indices: list of indices defining which spike-trains to use, if None all given spike trains are used (default=None) Returns: - A PieceWiseLinFunc representing the averaged spike distance S @@ -198,21 +199,21 @@ def isi_distance_matrix(spike_trains, indices=None): - indices: list of indices defining which spike-trains to use if None all given spike-trains are used (default=None) Return: - - a 2D array of size len(indices)*len(indices) containing the average + - a 2D array of size len(indices)*len(indices) containing the average pair-wise isi-distance """ - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] distance_matrix = np.zeros((len(indices), len(indices))) - for i,j in pairs: + for i, j in pairs: d = isi_distance(spike_trains[i], spike_trains[j]).abs_avrg() - distance_matrix[i,j] = d - distance_matrix[j,i] = d + distance_matrix[i, j] = d + distance_matrix[j, i] = d return distance_matrix diff --git a/pyspike/function.py b/pyspike/function.py index 243ef67..8107538 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -1,7 +1,7 @@ """ function.py -Module containing classes representing piece-wise constant and piece-wise linear -functions. +Module containing classes representing piece-wise constant and piece-wise +linear functions. Copyright 2014, Mario Mulansky @@ -35,7 +35,7 @@ class PieceWiseConstFunc: Args: - other: another PieceWiseConstFunc object Returns: - True if the two functions are equal up to `decimal` decimals, + True if the two functions are equal up to `decimal` decimals, False otherwise """ eps = 10.0**(-decimal) @@ -61,23 +61,23 @@ class PieceWiseConstFunc: """ Computes the average of the piece-wise const function: a = 1/T int f(x) dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ (self.x[-1]-self.x[0]) def abs_avrg(self): - """ Computes the average of the abs value of the piece-wise const + """ Computes the average of the abs value of the piece-wise const function: a = 1/T int |f(x)| dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * np.abs(self.y)) / \ (self.x[-1]-self.x[0]) def add(self, f): - """ Adds another PieceWiseConst function to this function. + """ Adds another PieceWiseConst function to this function. Note: only functions defined on the same interval can be summed. Args: - f: PieceWiseConst function to be added. @@ -87,13 +87,13 @@ class PieceWiseConstFunc: # python implementation # from python_backend import add_piece_wise_const_python - # self.x, self.y = add_piece_wise_const_python(self.x, self.y, f.x, f.y) + # self.x, self.y = add_piece_wise_const_python(self.x, self.y, + # f.x, f.y) # cython version from cython_add import add_piece_wise_const_cython self.x, self.y = add_piece_wise_const_cython(self.x, self.y, f.x, f.y) - def mul_scalar(self, fac): """ Multiplies the function with a scalar value Args: @@ -113,10 +113,10 @@ class PieceWiseLinFunc: Args: - x: array of length N+1 defining the edges of the intervals of the pwc function. - - y1: array of length N defining the function values at the left of the - intervals. - - y2: array of length N defining the function values at the right of the + - y1: array of length N defining the function values at the left of the intervals. + - y2: array of length N defining the function values at the right of + the intervals. """ self.x = np.array(x) self.y1 = np.array(y1) @@ -128,7 +128,7 @@ class PieceWiseLinFunc: Args: - other: another PieceWiseLinFunc object Returns: - True if the two functions are equal up to `decimal` decimals, + True if the two functions are equal up to `decimal` decimals, False otherwise """ eps = 10.0**(-decimal) @@ -153,7 +153,7 @@ class PieceWiseLinFunc: """ Computes the average of the piece-wise linear function: a = 1/T int f(x) dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ (self.x[-1]-self.x[0]) @@ -162,13 +162,13 @@ class PieceWiseLinFunc: """ Computes the absolute average of the piece-wise linear function: a = 1/T int |f(x)| dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * 0.5 * (np.abs(self.y1)+np.abs(self.y2)))/(self.x[-1]-self.x[0]) def add(self, f): - """ Adds another PieceWiseLin function to this function. + """ Adds another PieceWiseLin function to this function. Note: only functions defined on the same interval can be summed. Args: - f: PieceWiseLin function to be added. @@ -178,7 +178,7 @@ class PieceWiseLinFunc: # python implementation # from python_backend import add_piece_wise_lin_python - # self.x, self.y1, self.y2 = add_piece_wise_lin_python( + # self.x, self.y1, self.y2 = add_piece_wise_lin_python( # self.x, self.y1, self.y2, f.x, f.y1, f.y2) # cython version @@ -186,7 +186,6 @@ class PieceWiseLinFunc: self.x, self.y1, self.y2 = add_piece_wise_lin_cython( self.x, self.y1, self.y2, f.x, f.y1, f.y2) - def mul_scalar(self, fac): """ Multiplies the function with a scalar value Args: diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index e5b74e9..cf1a92f 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -1,6 +1,6 @@ """ python_backend.py -Collection of python functions that can be used instead of the cython +Collection of python functions that can be used instead of the cython implementation. Copyright 2014, Mario Mulansky @@ -21,18 +21,18 @@ def isi_distance_python(s1, s2): """ Plain Python implementation of the isi distance. """ # compute the interspike interval - nu1 = s1[1:]-s1[:-1] - nu2 = s2[1:]-s2[:-1] - + nu1 = s1[1:] - s1[:-1] + nu2 = s2[1:] - s2[:-1] + # compute the isi-distance - spike_events = np.empty(len(nu1)+len(nu2)) + spike_events = np.empty(len(nu1) + len(nu2)) spike_events[0] = s1[0] # the values have one entry less - the number of intervals between events - isi_values = np.empty(len(spike_events)-1) + isi_values = np.empty(len(spike_events) - 1) # add the distance of the first events # isi_values[0] = nu1[0]/nu2[0] - 1.0 if nu1[0] <= nu2[0] \ # else 1.0 - nu2[0]/nu1[0] - isi_values[0] = (nu1[0]-nu2[0])/max(nu1[0],nu2[0]) + isi_values[0] = (nu1[0] - nu2[0]) / max(nu1[0], nu2[0]) index1 = 0 index2 = 0 index = 1 @@ -49,28 +49,28 @@ def isi_distance_python(s1, s2): if index2 >= len(nu2): break spike_events[index] = s2[index2] - else: # s1[index1+1] == s2[index2+1] + else: # s1[index1 + 1] == s2[index2 + 1] index1 += 1 index2 += 1 if (index1 >= len(nu1)) or (index2 >= len(nu2)): break spike_events[index] = s1[index1] # compute the corresponding isi-distance - isi_values[index] = (nu1[index1]-nu2[index2]) / \ - max(nu1[index1], nu2[index2]) + isi_values[index] = (nu1[index1] - nu2[index2]) / \ + max(nu1[index1], nu2[index2]) index += 1 # the last event is the interval end spike_events[index] = s1[-1] - # use only the data added above + # use only the data added above # could be less than original length due to equal spike times - return PieceWiseConstFunc(spike_events[:index+1], isi_values[:index]) + return PieceWiseConstFunc(spike_events[:index + 1], isi_values[:index]) ############################################################ # get_min_dist ############################################################ def get_min_dist(spike_time, spike_train, start_index=0): - """ Returns the minimal distance |spike_time - spike_train[i]| + """ Returns the minimal distance |spike_time - spike_train[i]| with i>=start_index. """ d = abs(spike_time - spike_train[start_index]) @@ -99,18 +99,18 @@ def spike_distance_python(spikes1, spikes2): - PieceWiseLinFunc describing the spike-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # shorter variables t1 = spikes1 t2 = spikes2 - spike_events = np.empty(len(t1)+len(t2)-2) + spike_events = np.empty(len(t1) + len(t2) - 2) spike_events[0] = t1[0] - y_starts = np.empty(len(spike_events)-1) - y_ends = np.empty(len(spike_events)-1) + y_starts = np.empty(len(spike_events) - 1) + y_ends = np.empty(len(spike_events) - 1) index1 = 0 index2 = 0 @@ -133,9 +133,10 @@ def spike_distance_python(spikes1, spikes2): break spike_events[index] = t1[index1] # first calculate the previous interval end value - dt_p1 = dt_f1 # the previous time now was the following time before + dt_p1 = dt_f1 # the previous time was the following time before s1 = dt_p1 - s2 = (dt_p2*(t2[index2+1]-t1[index1]) + dt_f2*(t1[index1]-t2[index2])) / isi2 + s2 = (dt_p2*(t2[index2+1]-t1[index1]) + + dt_f2*(t1[index1]-t2[index2])) / isi2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) # now the next interval start value dt_f1 = get_min_dist(t1[index1+1], t2, index2) @@ -148,8 +149,9 @@ def spike_distance_python(spikes1, spikes2): break spike_events[index] = t2[index2] # first calculate the previous interval end value - dt_p2 = dt_f2 # the previous time now was the following time before - s1 = (dt_p1*(t1[index1+1]-t2[index2]) + dt_f1*(t2[index2]-t1[index1])) / isi1 + dt_p2 = dt_f2 # the previous time was the following time before + s1 = (dt_p1*(t1[index1+1]-t2[index2]) + + dt_f1*(t2[index2]-t1[index1])) / isi1 s2 = dt_p2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) # now the next interval start value @@ -158,7 +160,7 @@ def spike_distance_python(spikes1, spikes2): isi2 = t2[index2+1]-t2[index2] # s2 is the same as above, thus we can compute y2 immediately y_starts[index] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) - else: # t1[index1+1] == t2[index2+1] - generate only one event + else: # t1[index1+1] == t2[index2+1] - generate only one event index1 += 1 index2 += 1 if (index1+1 >= len(t1)) or (index2+1 >= len(t2)): @@ -183,9 +185,9 @@ def spike_distance_python(spikes1, spikes2): s1 = dt_p1*(t1[-1]-t1[-2])/isi1 s2 = dt_p2*(t2[-1]-t2[-2])/isi2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) - # use only the data added above + # use only the data added above # could be less than original length due to equal spike times - return PieceWiseLinFunc(spike_events[:index+1], + return PieceWiseLinFunc(spike_events[:index+1], y_starts[:index], y_ends[:index]) @@ -209,7 +211,7 @@ def add_piece_wise_const_python(x1, y1, x2, y2): elif x1[index1+1] > x2[index2+1]: index2 += 1 x_new[index] = x2[index2] - else: # x1[index1+1] == x2[index2+1]: + else: # x1[index1+1] == x2[index2+1]: index1 += 1 index2 += 1 x_new[index] = x1[index1] @@ -217,15 +219,13 @@ def add_piece_wise_const_python(x1, y1, x2, y2): # one array reached the end -> copy the contents of the other to the end if index1+1 < len(y1): x_new[index+1:index+1+len(x1)-index1-1] = x1[index1+1:] - y_new[index+1:index+1+len(y1)-index1-1] = y1[index1+1:] + \ - y2[-1] + y_new[index+1:index+1+len(y1)-index1-1] = y1[index1+1:] + y2[-1] index += len(x1)-index1-2 elif index2+1 < len(y2): x_new[index+1:index+1+len(x2)-index2-1] = x2[index2+1:] - y_new[index+1:index+1+len(y2)-index2-1] = y2[index2+1:] + \ - y1[-1] + y_new[index+1:index+1+len(y2)-index2-1] = y2[index2+1:] + y1[-1] index += len(x2)-index2-2 - else: # both arrays reached the end simultaneously + else: # both arrays reached the end simultaneously # only the last x-value missing x_new[index+1] = x1[-1] # the last value is again the end of the interval @@ -244,9 +244,9 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): y2_new = np.empty_like(y1_new) x_new[0] = x1[0] y1_new[0] = y11[0] + y21[0] - index1 = 0 # index for self - index2 = 0 # index for f - index = 0 # index for new + index1 = 0 # index for self + index2 = 0 # index for f + index = 0 # index for new while (index1+1 < len(y11)) and (index2+1 < len(y21)): # print(index1+1, x1[index1+1], self.y[index1+1], x_new[index]) if x1[index1+1] < x2[index2+1]: @@ -272,7 +272,7 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): x_new[index] = x2[index2] # and the starting value for the next interval y1_new[index] = y21[index2] + y - else: # x1[index1+1] == x2[index2+1]: + else: # x1[index1+1] == x2[index2+1]: y2_new[index] = y12[index1] + y22[index2] index1 += 1 index2 += 1 @@ -297,7 +297,7 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): y1_new[index+1:index+1+len(y21)-index2-1] = y21[index2+1:] + y y2_new[index:index+len(y22)-index2-1] = y22[index2:-1] + y index += len(x2)-index2-2 - else: # both arrays reached the end simultaneously + else: # both arrays reached the end simultaneously # only the last x-value missing x_new[index+1] = x1[-1] # finally, the end value for the last interval diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6ea94de..c496ab8 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -31,11 +31,11 @@ def add_auxiliary_spikes(spike_train, time_interval): except: T_start = 0 T_end = time_interval - + assert spike_train[0] >= T_start, \ - "Spike train has events before the given start time" + "Spike train has events before the given start time" assert spike_train[-1] <= T_end, \ - "Spike train has events after the given end time" + "Spike train has events after the given end time" if spike_train[0] != T_start: spike_train = np.insert(spike_train, 0, T_start) if spike_train[-1] != T_end: @@ -64,16 +64,16 @@ def spike_train_from_string(s, sep=' ', sort=True): ############################################################ # load_spike_trains_txt ############################################################ -def load_spike_trains_from_txt(file_name, time_interval=None, +def load_spike_trains_from_txt(file_name, time_interval=None, separator=' ', comment='#', sort=True): - """ Loads a number of spike trains from a text file. Each line of the text - file should contain one spike train as a sequence of spike times separated - by `separator`. Empty lines as well as lines starting with `comment` are - neglected. The `time_interval` represents the start and the end of the spike - trains and it is used to add auxiliary spikes at the beginning and end of - each spike train. However, if `time_interval == None`, no auxiliary spikes - are added, but note that the Spike and ISI distance both require auxiliary - spikes. + """ Loads a number of spike trains from a text file. Each line of the text + file should contain one spike train as a sequence of spike times separated + by `separator`. Empty lines as well as lines starting with `comment` are + neglected. The `time_interval` represents the start and the end of the + spike trains and it is used to add auxiliary spikes at the beginning and + end of each spike train. However, if `time_interval == None`, no auxiliary + spikes are added, but note that the Spike and ISI distance both require + auxiliary spikes. Args: - file_name: The name of the text file. - time_interval: A pair (T_start, T_end) of values representing the start @@ -87,10 +87,10 @@ def load_spike_trains_from_txt(file_name, time_interval=None, spike_trains = [] spike_file = open(file_name, 'r') for line in spike_file: - if len(line) > 1 and not line.startswith(comment): + if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented spike_train = spike_train_from_string(line, separator, sort) - if not time_interval == None: # add auxiliary spikes if times given + if time_interval is not None: # add auxil. spikes if times given spike_train = add_auxiliary_spikes(spike_train, time_interval) spike_trains.append(spike_train) return spike_trains @@ -109,19 +109,19 @@ def merge_spike_trains(spike_trains): # get the lengths of the spike trains lens = np.array([len(st) for st in spike_trains]) merged_spikes = np.empty(np.sum(lens)) - index = 0 # the index for merged_spikes - indices = np.zeros_like(lens) # indices of the spike trains - index_list = np.arange(len(indices)) # indices of indices of spike trains - # that have not yet reached the end + index = 0 # the index for merged_spikes + indices = np.zeros_like(lens) # indices of the spike trains + index_list = np.arange(len(indices)) # indices of indices of spike trains + # that have not yet reached the end # list of the possible events in the spike trains vals = [spike_trains[i][indices[i]] for i in index_list] while len(index_list) > 0: - i = np.argmin(vals) # the next spike is the minimum - merged_spikes[index] = vals[i] # put it to the merged spike train + i = np.argmin(vals) # the next spike is the minimum + merged_spikes[index] = vals[i] # put it to the merged spike train i = index_list[i] - index += 1 # next index of merged spike train - indices[i] += 1 # next index for the chosen spike train - if indices[i] >= lens[i]: # remove spike train index if ended + index += 1 # next index of merged spike train + indices[i] += 1 # next index for the chosen spike train + if indices[i] >= lens[i]: # remove spike train index if ended index_list = index_list[index_list != i] - vals = [spike_trains[i][indices[i]] for i in index_list] + vals = [spike_trains[n][indices[n]] for n in index_list] return merged_spikes diff --git a/test/test_distance.py b/test/test_distance.py index dafe693..3371cbd 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -22,8 +22,8 @@ def test_isi(): t2 = np.array([0.3, 0.45, 0.8, 0.9, 0.95]) # pen&paper calculation of the isi distance - expected_times = [0.0,0.2,0.3,0.4,0.45,0.6,0.7,0.8,0.9,0.95,1.0] - expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, + expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] + expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, -0.25/0.35, -0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] t1 = spk.add_auxiliary_spikes(t1, 1.0) @@ -36,10 +36,10 @@ def test_isi(): assert_array_almost_equal(f.y, expected_isi, decimal=14) # check with some equal spike times - t1 = np.array([0.2,0.4,0.6]) - t2 = np.array([0.1,0.4,0.5,0.6]) + t1 = np.array([0.2, 0.4, 0.6]) + t2 = np.array([0.1, 0.4, 0.5, 0.6]) - expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0] + expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] expected_isi = [0.1/0.2, -0.1/0.3, -0.1/0.3, 0.1/0.2, 0.1/0.2, -0.0/0.5] t1 = spk.add_auxiliary_spikes(t1, 1.0) @@ -56,11 +56,11 @@ def test_spike(): t2 = np.array([0.3, 0.45, 0.8, 0.9, 0.95]) # pen&paper calculation of the spike distance - expected_times = [0.0,0.2,0.3,0.4,0.45,0.6,0.7,0.8,0.9,0.95,1.0] + expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] s1 = np.array([0.1, 0.1, (0.1*0.1+0.05*0.1)/0.2, 0.05, (0.05*0.15 * 2)/0.2, 0.15, 0.1, 0.1*0.2/0.3, 0.1**2/0.3, 0.1*0.05/0.3, 0.1]) - s2 = np.array([0.1, 0.1*0.2/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05, - (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35, + s2 = np.array([0.1, 0.1*0.2/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05, + (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35, 0.1, 0.1, 0.05, 0.05]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3]) isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05]) @@ -76,17 +76,17 @@ def test_spike(): assert_array_almost_equal(f.y2, expected_y2, decimal=14) # check with some equal spike times - t1 = np.array([0.2,0.4,0.6]) - t2 = np.array([0.1,0.4,0.5,0.6]) + t1 = np.array([0.2, 0.4, 0.6]) + t2 = np.array([0.1, 0.4, 0.5, 0.6]) - expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0] + expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] s1 = np.array([0.1, 0.1*0.1/0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) s2 = np.array([0.1*0.1/0.3, 0.1, 0.1*0.2/0.3, 0.0, 0.1, 0.0, 0.0]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4]) isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4]) expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) - + t1 = spk.add_auxiliary_spikes(t1, 1.0) t2 = spk.add_auxiliary_spikes(t2, 1.0) f = spk.spike_distance(t1, t2) @@ -100,8 +100,8 @@ def check_multi_distance(dist_func, dist_func_multi): # generate spike trains: t1 = spk.add_auxiliary_spikes(np.array([0.2, 0.4, 0.6, 0.7]), 1.0) t2 = spk.add_auxiliary_spikes(np.array([0.3, 0.45, 0.8, 0.9, 0.95]), 1.0) - t3 = spk.add_auxiliary_spikes(np.array([0.2,0.4,0.6]), 1.0) - t4 = spk.add_auxiliary_spikes(np.array([0.1,0.4,0.5,0.6]), 1.0) + t3 = spk.add_auxiliary_spikes(np.array([0.2, 0.4, 0.6]), 1.0) + t4 = spk.add_auxiliary_spikes(np.array([0.1, 0.4, 0.5, 0.6]), 1.0) spike_trains = [t1, t2, t3, t4] f12 = dist_func(t1, t2) @@ -111,17 +111,17 @@ def check_multi_distance(dist_func, dist_func_multi): f24 = dist_func(t2, t4) f34 = dist_func(t3, t4) - f_multi = dist_func_multi(spike_trains, [0,1]) + f_multi = dist_func_multi(spike_trains, [0, 1]) assert f_multi.almost_equal(f12, decimal=14) f = copy(f12) f.add(f13) f.add(f23) f.mul_scalar(1.0/3) - f_multi = dist_func_multi(spike_trains, [0,1,2]) + f_multi = dist_func_multi(spike_trains, [0, 1, 2]) assert f_multi.almost_equal(f, decimal=14) - f.mul_scalar(3) # revert above normalization + f.mul_scalar(3) # revert above normalization f.add(f14) f.add(f24) f.add(f34) @@ -139,6 +139,7 @@ def test_multi_spike(): if __name__ == "__main__": - test_auxiliary_spikes() test_isi() test_spike() + test_multi_isi() + test_multi_spike() diff --git a/test/test_function.py b/test/test_function.py index c0fb3fd..ed7d6bc 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,18 +10,18 @@ Distributed under the MIT License (MIT) from __future__ import print_function import numpy as np from copy import copy -from numpy.testing import assert_equal, assert_almost_equal, \ - assert_array_almost_equal +from numpy.testing import assert_almost_equal, assert_array_almost_equal import pyspike as spk + def test_pwc(): # some random data x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) xp, yp = f.get_plottable_data() - + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] yp_expected = [1.0, 1.0, -0.5, -0.5, 1.5, 1.5, 0.75, 0.75] assert_array_almost_equal(xp, xp_expected, decimal=16) @@ -51,17 +51,18 @@ def test_pwc_add(): f2.add(f) assert_array_almost_equal(f2.x, x_expected, decimal=16) assert_array_almost_equal(f2.y, y_expected, decimal=16) - + f1.add(f2) # same x, but y doubled assert_array_almost_equal(f1.x, f2.x, decimal=16) assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) + def test_pwc_mul(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) - + f.mul_scalar(1.5) assert_array_almost_equal(f.x, x, decimal=16) assert_array_almost_equal(f.y, 1.5*np.array(y), decimal=16) @@ -75,15 +76,15 @@ def test_pwl(): y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) xp, yp = f.get_plottable_data() - + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] yp_expected = [1.0, 1.5, -0.5, -0.4, 1.5, 1.5, 0.75, 0.25] assert_array_almost_equal(xp, xp_expected, decimal=16) assert_array_almost_equal(yp, yp_expected, decimal=16) - + avrg_expected = (1.25 - 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.avrg(), avrg_expected, decimal=16) - + abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) @@ -113,7 +114,7 @@ def test_pwl_add(): assert_array_almost_equal(f2.x, x_expected, decimal=16) assert_array_almost_equal(f2.y1, y1_expected, decimal=16) assert_array_almost_equal(f2.y2, y2_expected, decimal=16) - + f1.add(f2) # same x, but y doubled assert_array_almost_equal(f1.x, f2.x, decimal=16) @@ -121,12 +122,12 @@ def test_pwl_add(): assert_array_almost_equal(f1.y2, 2*f2.y2, decimal=16) -def test_pwc_mul(): +def test_pwl_mul(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y1 = [1.0, -0.5, 1.5, 0.75] y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) - + f.mul_scalar(1.5) assert_array_almost_equal(f.x, x, decimal=16) assert_array_almost_equal(f.y1, 1.5*np.array(y1), decimal=16) @@ -137,3 +138,8 @@ def test_pwc_mul(): if __name__ == "__main__": test_pwc() + test_pwc_add() + test_pwc_mul() + test_pwl() + test_pwl_add() + test_pwl_mul() diff --git a/test/test_spikes.py b/test/test_spikes.py index e008207..349e0bf 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -23,13 +23,13 @@ def test_auxiliary_spikes(): def test_load_from_txt(): - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) assert len(spike_trains) == 40 # check the first spike train - spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1, - 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7, + spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1, + 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7, 3644.3, 3936.3, 4000] assert_equal(spike_times, spike_trains[0]) @@ -39,15 +39,15 @@ def test_load_from_txt(): assert spike_train[-1] == 4000 # load without adding auxiliary spikes - spike_trains2 = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=None) + spike_trains2 = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=None) assert len(spike_trains2) == 40 # check auxiliary spikes for i in xrange(len(spike_trains)): - assert len(spike_trains[i]) == len(spike_trains2[i])+2 # two spikes less + assert len(spike_trains[i]) == len(spike_trains2[i])+2 # 2 spikes less -def check_merged_spikes( merged_spikes, spike_trains ): +def check_merged_spikes(merged_spikes, spike_trains): # create a flat array with all spike events all_spikes = np.array([]) for spike_train in spike_trains: @@ -55,7 +55,7 @@ def check_merged_spikes( merged_spikes, spike_trains ): indices = np.zeros_like(all_spikes, dtype='bool') # check if we find all the spike events in the original spike trains for x in merged_spikes: - i = np.where(all_spikes == x)[0][0] # the first axis and the first entry + i = np.where(all_spikes == x)[0][0] # first axis and first entry # change to something impossible so we dont find this event again all_spikes[i] = -1.0 indices[i] = True @@ -64,23 +64,22 @@ def check_merged_spikes( merged_spikes, spike_trains ): def test_merge_spike_trains(): # first load the data - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) # test if result is sorted assert((spikes == np.sort(spikes)).all()) # check merging - check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] ) + check_merged_spikes(spikes, [spike_trains[0], spike_trains[1]]) spikes = spk.merge_spike_trains(spike_trains) # test if result is sorted assert((spikes == np.sort(spikes)).all()) # check merging - check_merged_spikes( spikes, spike_trains ) + check_merged_spikes(spikes, spike_trains) if __name__ == "main": test_auxiliary_spikes() test_load_from_txt() test_merge_spike_trains() - -- cgit v1.2.3 From 5ce807943fab2ba233cff661e34e4d6a83397b99 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 13 Oct 2014 11:03:42 +0200 Subject: changed to BSD license --- License | 25 ++++++++----------------- examples/isi_matrix.py | 2 +- examples/merge.py | 2 +- examples/plot.py | 2 +- pyspike/__init__.py | 2 +- pyspike/cython_distance.pyx | 2 +- pyspike/distances.py | 2 +- pyspike/function.py | 2 +- pyspike/python_backend.py | 2 +- pyspike/spikes.py | 2 +- test/test_distance.py | 2 +- test/test_function.py | 2 +- test/test_spikes.py | 2 +- 13 files changed, 20 insertions(+), 29 deletions(-) (limited to 'test/test_function.py') diff --git a/License b/License index 95d0405..472deac 100644 --- a/License +++ b/License @@ -1,21 +1,12 @@ -The MIT License (MIT) +BSD License -Copyright (c) 2014 Mario Mulansky, +Copyright (c) 2014, Mario Mulansky +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index db740dd..7bf1cf9 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -5,7 +5,7 @@ trains. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/examples/merge.py b/examples/merge.py index 726d32b..2550cdb 100644 --- a/examples/merge.py +++ b/examples/merge.py @@ -4,7 +4,7 @@ Simple example showing the merging of two spike trains. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/examples/plot.py b/examples/plot.py index 5c3ad4a..da53670 100644 --- a/examples/plot.py +++ b/examples/plot.py @@ -4,7 +4,7 @@ Simple example showing how to load and plot spike trains and their distances. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 3867e6e..c58a6b1 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,7 +1,7 @@ """ Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ __all__ = ["function", "distances", "spikes"] diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index 4ab4381..ccf8060 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -12,7 +12,7 @@ improves the performance of spike_distance by a factor of 10! Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/distances.py b/pyspike/distances.py index b2eec92..3b9fe1f 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -4,7 +4,7 @@ Module containing several functions to compute spike distances Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ import numpy as np diff --git a/pyspike/function.py b/pyspike/function.py index 8107538..7722cc3 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -5,7 +5,7 @@ linear functions. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index cf1a92f..a1f5ea2 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -5,7 +5,7 @@ implementation. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index c496ab8..d390222 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -4,7 +4,7 @@ Module containing several function to load and transform spike trains Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ import numpy as np diff --git a/test/test_distance.py b/test/test_distance.py index 3371cbd..b500b2c 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -4,7 +4,7 @@ Tests the isi- and spike-distance computation Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/test/test_function.py b/test/test_function.py index ed7d6bc..a579796 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -4,7 +4,7 @@ Tests the PieceWiseConst and PieceWiseLinear functions Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/test/test_spikes.py b/test/test_spikes.py index 349e0bf..bf914c0 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -4,7 +4,7 @@ Test loading of spike trains from text files Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function -- cgit v1.2.3 From 65801901e6d3325c8d1c82ab92334ca19ebd92d7 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 14 Oct 2014 17:49:23 +0200 Subject: changed isi distance profile to abs values --- examples/plot.py | 2 -- pyspike/cython_distance.pyx | 4 ++-- pyspike/distances.py | 2 +- pyspike/function.py | 16 +++------------- test/test_distance.py | 6 +++--- test/test_function.py | 2 -- 6 files changed, 9 insertions(+), 23 deletions(-) (limited to 'test/test_function.py') diff --git a/examples/plot.py b/examples/plot.py index da53670..4ac8f5d 100644 --- a/examples/plot.py +++ b/examples/plot.py @@ -29,8 +29,6 @@ plt.figure() plt.plot(x, np.abs(y), '--k') print("Average: %.8f" % f.avrg()) -print("Absolute average: %.8f" % f.abs_avrg()) - f = spk.spike_distance(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index ccf8060..178fcba 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -60,7 +60,7 @@ def isi_distance_cython(double[:] s1, isi_values = np.empty(N1+N2-1) with nogil: # release the interpreter to allow multithreading - isi_values[0] = (nu1-nu2)/fmax(nu1,nu2) + isi_values[0] = fabs(nu1-nu2)/fmax(nu1, nu2) index1 = 0 index2 = 0 index = 1 @@ -88,7 +88,7 @@ def isi_distance_cython(double[:] s1, nu1 = s1[index1+1]-s1[index1] nu2 = s2[index2+1]-s2[index2] # compute the corresponding isi-distance - isi_values[index] = (nu1 - nu2) / fmax(nu1, nu2) + isi_values[index] = fabs(nu1 - nu2) / fmax(nu1, nu2) index += 1 # the last event is the interval end spike_events[index] = s1[N1] diff --git a/pyspike/distances.py b/pyspike/distances.py index 3b9fe1f..08d0ed8 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -213,7 +213,7 @@ def isi_distance_matrix(spike_trains, indices=None): distance_matrix = np.zeros((len(indices), len(indices))) for i, j in pairs: - d = isi_distance(spike_trains[i], spike_trains[j]).abs_avrg() + d = isi_distance(spike_trains[i], spike_trains[j]).avrg() distance_matrix[i, j] = d distance_matrix[j, i] = d return distance_matrix diff --git a/pyspike/function.py b/pyspike/function.py index 7722cc3..bd3e2d5 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -18,7 +18,7 @@ import numpy as np ############################################################## class PieceWiseConstFunc: """ A class representing a piece-wise constant function. """ - + def __init__(self, x, y): """ Constructs the piece-wise const function. Args: @@ -66,16 +66,6 @@ class PieceWiseConstFunc: return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ (self.x[-1]-self.x[0]) - def abs_avrg(self): - """ Computes the average of the abs value of the piece-wise const - function: - a = 1/T int |f(x)| dx where T is the length of the interval. - Returns: - - the average a. - """ - return np.sum((self.x[1:]-self.x[:-1]) * np.abs(self.y)) / \ - (self.x[-1]-self.x[0]) - def add(self, f): """ Adds another PieceWiseConst function to this function. Note: only functions defined on the same interval can be summed. @@ -84,7 +74,7 @@ class PieceWiseConstFunc: """ assert self.x[0] == f.x[0], "The functions have different intervals" assert self.x[-1] == f.x[-1], "The functions have different intervals" - + # python implementation # from python_backend import add_piece_wise_const_python # self.x, self.y = add_piece_wise_const_python(self.x, self.y, @@ -107,7 +97,7 @@ class PieceWiseConstFunc: ############################################################## class PieceWiseLinFunc: """ A class representing a piece-wise linear function. """ - + def __init__(self, x, y1, y2): """ Constructs the piece-wise linear function. Args: diff --git a/test/test_distance.py b/test/test_distance.py index b500b2c..6e50467 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -23,8 +23,8 @@ def test_isi(): # pen&paper calculation of the isi distance expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] - expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, - -0.25/0.35, -0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] + expected_isi = [0.1/0.3, 0.1/0.3, 0.05/0.2, 0.05/0.2, 0.15/0.35, + 0.25/0.35, 0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] t1 = spk.add_auxiliary_spikes(t1, 1.0) t2 = spk.add_auxiliary_spikes(t2, 1.0) @@ -40,7 +40,7 @@ def test_isi(): t2 = np.array([0.1, 0.4, 0.5, 0.6]) expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] - expected_isi = [0.1/0.2, -0.1/0.3, -0.1/0.3, 0.1/0.2, 0.1/0.2, -0.0/0.5] + expected_isi = [0.1/0.2, 0.1/0.3, 0.1/0.3, 0.1/0.2, 0.1/0.2, 0.0/0.5] t1 = spk.add_auxiliary_spikes(t1, 1.0) t2 = spk.add_auxiliary_spikes(t2, 1.0) diff --git a/test/test_function.py b/test/test_function.py index a579796..c5caa5a 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -28,8 +28,6 @@ def test_pwc(): assert_array_almost_equal(yp, yp_expected, decimal=16) assert_almost_equal(f.avrg(), (1.0-0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) - assert_almost_equal(f.abs_avrg(), (1.0+0.5+0.5*1.5+1.5*0.75)/4.0, - decimal=16) def test_pwc_add(): -- cgit v1.2.3 From 2a99e3bf2c575efc9abbc1cf262810d223f2cad0 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 15 Oct 2014 12:32:09 +0200 Subject: +average_profile function --- pyspike/__init__.py | 2 +- pyspike/function.py | 35 +++++++++++++++++++++++++++++++++++ test/test_function.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 1 deletion(-) (limited to 'test/test_function.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index c58a6b1..1bfa7fc 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -6,7 +6,7 @@ Distributed under the BSD License __all__ = ["function", "distances", "spikes"] -from function import PieceWiseConstFunc, PieceWiseLinFunc +from function import PieceWiseConstFunc, PieceWiseLinFunc, average_profile from distances import isi_distance, spike_distance, \ isi_distance_multi, spike_distance_multi, isi_distance_matrix from spikes import add_auxiliary_spikes, load_spike_trains_from_txt, \ diff --git a/pyspike/function.py b/pyspike/function.py index bd3e2d5..46fdea2 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -26,9 +26,17 @@ class PieceWiseConstFunc: function. - y: array of length N defining the function values at the intervals. """ + # convert parameters to arrays, also ensures copying self.x = np.array(x) self.y = np.array(y) + def copy(self): + """ Returns a copy of itself + Returns: + - PieceWiseConstFunc copy + """ + return PieceWiseConstFunc(self.x, self.y) + def almost_equal(self, other, decimal=14): """ Checks if the function is equal to another function up to `decimal` precision. @@ -108,10 +116,18 @@ class PieceWiseLinFunc: - y2: array of length N defining the function values at the right of the intervals. """ + # convert to array, which also ensures copying self.x = np.array(x) self.y1 = np.array(y1) self.y2 = np.array(y2) + def copy(self): + """ Returns a copy of itself + Returns: + - PieceWiseLinFunc copy + """ + return PieceWiseLinFunc(self.x, self.y1, self.y2) + def almost_equal(self, other, decimal=14): """ Checks if the function is equal to another function up to `decimal` precision. @@ -183,3 +199,22 @@ class PieceWiseLinFunc: """ self.y1 *= fac self.y2 *= fac + + +def average_profile(profiles): + """ Computes the average profile from the given ISI- or SPIKE-profiles. + Args: + - profiles: list of PieceWiseConstFunc or PieceWiseLinFunc representing + ISI- or SPIKE-profiles to be averaged + Returns: + - avrg_profile: PieceWiseConstFunc or PieceWiseLinFunc containing the + average profile. + """ + assert len(profiles) > 1 + + avrg_profile = profiles[0].copy() + for i in xrange(1, len(profiles)): + avrg_profile.add(profiles[i]) + avrg_profile.mul_scalar(1.0/len(profiles)) # normalize + + return avrg_profile diff --git a/test/test_function.py b/test/test_function.py index c5caa5a..ed70180 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -68,6 +68,23 @@ def test_pwc_mul(): assert_array_almost_equal(f.y, 1.5/5.0*np.array(y), decimal=16) +def test_pwc_avrg(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + + x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] + y = [0.5, 1.0, -0.25, 0.0, 1.5] + f2 = spk.PieceWiseConstFunc(x, y) + + f_avrg = spk.average_profile([f1, f2]) + x_expected = [0.0, 0.75, 1.0, 2.0, 2.5, 2.7, 4.0] + y_expected = [0.75, 1.0, 0.25, 0.625, 0.375, 1.125] + assert_array_almost_equal(f_avrg.x, x_expected, decimal=16) + assert_array_almost_equal(f_avrg.y, y_expected, decimal=16) + + def test_pwl(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y1 = [1.0, -0.5, 1.5, 0.75] @@ -134,6 +151,31 @@ def test_pwl_mul(): assert_array_almost_equal(f.y1, 1.5/5.0*np.array(y1), decimal=16) assert_array_almost_equal(f.y2, 1.5/5.0*np.array(y2), decimal=16) + +def test_pwl_avrg(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f1 = spk.PieceWiseLinFunc(x, y1, y2) + + x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] + y1 = [0.5, 1.0, -0.25, 0.0, 1.5] + y2 = [0.8, 0.2, -1.0, 0.0, 2.0] + f2 = spk.PieceWiseLinFunc(x, y1, y2) + + x_expected = [0.0, 0.75, 1.0, 2.0, 2.5, 2.7, 4.0] + y1_expected = np.array([1.5, 1.0+1.0+0.5*0.75, -0.5+1.0-0.8*0.25/1.25, + 1.5-0.25, 0.75, 1.5+0.75-0.5*0.2/1.5]) / 2 + y2_expected = np.array([0.8+1.0+0.5*0.75, 1.5+1.0-0.8*0.25/1.25, -0.4+0.2, + 1.5-1.0, 0.75-0.5*0.2/1.5, 2.25]) / 2 + + f_avrg = spk.average_profile([f1, f2]) + + assert_array_almost_equal(f_avrg.x, x_expected, decimal=16) + assert_array_almost_equal(f_avrg.y1, y1_expected, decimal=16) + assert_array_almost_equal(f_avrg.y2, y2_expected, decimal=16) + + if __name__ == "__main__": test_pwc() test_pwc_add() -- cgit v1.2.3 From 0005a0f170f69573c8101c4d400131b0dc9d8ee5 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 16 Oct 2014 18:07:49 +0200 Subject: deleted abs_avrg test --- test/test_function.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'test/test_function.py') diff --git a/test/test_function.py b/test/test_function.py index ed70180..c856d76 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -100,9 +100,6 @@ def test_pwl(): avrg_expected = (1.25 - 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.avrg(), avrg_expected, decimal=16) - abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 - assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) - def test_pwl_add(): x = [0.0, 1.0, 2.0, 2.5, 4.0] -- cgit v1.2.3 From 7dbb5f87321fc89b6c84552cfd821f96fe364bbe Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 22 Oct 2014 18:30:17 +0200 Subject: pwc avrg now takes interval --- pyspike/function.py | 28 ++++++++++++++++++++++------ test/test_function.py | 5 +++++ 2 files changed, 27 insertions(+), 6 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/function.py b/pyspike/function.py index ed47f27..14ad7bd 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -74,15 +74,29 @@ class PieceWiseConstFunc(object): return x_plot, y_plot - def avrg(self): + def avrg(self, interval=None): """ Computes the average of the piece-wise const function: :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. :returns: the average a. :rtype: double """ - return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ - (self.x[-1]-self.x[0]) + if interval is None: + # no interval given, average over the whole spike train + return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ + (self.x[-1]-self.x[0]) + else: + start_ind = np.searchsorted(self.x, interval[0], side='right') + end_ind = np.searchsorted(self.x, interval[1], side='left')-1 + print(start_ind, end_ind) + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + a = np.sum((self.x[start_ind+1:end_ind] - + self.x[start_ind:end_ind-1]) * + self.y[start_ind:end_ind]) + a += (self.x[start_ind]-interval[0]) * self.y[start_ind] + a += (interval[1]-self.x[end_ind]) * self.y[end_ind] + return a / (interval[1]-interval[0]) def add(self, f): """ Adds another PieceWiseConst function to this function. @@ -181,15 +195,17 @@ class PieceWiseLinFunc: y_plot[1::2] = self.y2 return x_plot, y_plot - def avrg(self): + def avrg(self, interval=None): """ Computes the average of the piece-wise linear function: :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. :returns: the average a. :rtype: double """ - return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ - (self.x[-1]-self.x[0]) + if interval is None: + # no interval given, average over the whole spike train + return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ + (self.x[-1]-self.x[0]) def add(self, f): """ Adds another PieceWiseLin function to this function. diff --git a/test/test_function.py b/test/test_function.py index c856d76..cabcf44 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -29,6 +29,11 @@ def test_pwc(): assert_almost_equal(f.avrg(), (1.0-0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) + # interval averaging + a = f.avrg([0.5, 3.5]) + print(a) + assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) + def test_pwc_add(): # some random data -- cgit v1.2.3 From 11f05d9dac3711d89db37a043db3d9437958c6f3 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 22 Oct 2014 22:17:27 +0200 Subject: avrg functions now take intervals --- pyspike/function.py | 51 ++++++++++++++++++++++++++++++++++++++++++++------- test/test_function.py | 19 ++++++++++++++++++- 2 files changed, 62 insertions(+), 8 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/function.py b/pyspike/function.py index 14ad7bd..e340096 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -83,20 +83,24 @@ class PieceWiseConstFunc(object): """ if interval is None: # no interval given, average over the whole spike train - return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ + a = np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ (self.x[-1]-self.x[0]) else: + # find the indices corresponding to the interval start_ind = np.searchsorted(self.x, interval[0], side='right') end_ind = np.searchsorted(self.x, interval[1], side='left')-1 - print(start_ind, end_ind) assert start_ind > 0 and end_ind < len(self.x), \ "Invalid averaging interval" - a = np.sum((self.x[start_ind+1:end_ind] - - self.x[start_ind:end_ind-1]) * + # first the contribution from between the indices + a = np.sum((self.x[start_ind+1:end_ind+1] - + self.x[start_ind:end_ind]) * self.y[start_ind:end_ind]) - a += (self.x[start_ind]-interval[0]) * self.y[start_ind] + # correction from start to first index + a += (self.x[start_ind]-interval[0]) * self.y[start_ind-1] + # correction from last index to end a += (interval[1]-self.x[end_ind]) * self.y[end_ind] - return a / (interval[1]-interval[0]) + a /= (interval[1]-interval[0]) + return a def add(self, f): """ Adds another PieceWiseConst function to this function. @@ -202,10 +206,43 @@ class PieceWiseLinFunc: :returns: the average a. :rtype: double """ + + def intermediate_value(x0, x1, y0, y1, x): + """ computes the intermediate value of a linear function """ + return y0 + (y1-y0)*(x-x0)/(x1-x0) + if interval is None: # no interval given, average over the whole spike train - return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ + a = np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ (self.x[-1]-self.x[0]) + else: + # find the indices corresponding to the interval + start_ind = np.searchsorted(self.x, interval[0], side='right') + end_ind = np.searchsorted(self.x, interval[1], side='left')-1 + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + # first the contribution from between the indices + a = np.sum((self.x[start_ind+1:end_ind+1] - + self.x[start_ind:end_ind]) * + 0.5*(self.y1[start_ind:end_ind] + + self.y2[start_ind:end_ind])) + # correction from start to first index + a += (self.x[start_ind]-interval[0]) * 0.5 * \ + (self.y2[start_ind-1] + + intermediate_value(self.x[start_ind-1], self.x[start_ind], + self.y1[start_ind-1], + self.y2[start_ind-1], + interval[0] + )) + # correction from last index to end + a += (interval[1]-self.x[end_ind]) * 0.5 * \ + (self.y1[end_ind] + + intermediate_value(self.x[end_ind], self.x[end_ind+1], + self.y1[end_ind], self.y2[end_ind], + interval[1] + )) + a /= (interval[1]-interval[0]) + return a def add(self, f): """ Adds another PieceWiseLin function to this function. diff --git a/test/test_function.py b/test/test_function.py index cabcf44..ed0b2ed 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -31,8 +31,13 @@ def test_pwc(): # interval averaging a = f.avrg([0.5, 3.5]) - print(a) assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) + a = f.avrg([1.5, 3.5]) + assert_almost_equal(a, (-0.5*0.5+0.5*1.5+1.0*0.75)/2.0, decimal=16) + a = f.avrg([1.0, 3.5]) + assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16) + a = f.avrg([1.0, 4.0]) + assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.5*0.75)/3.0, decimal=16) def test_pwc_add(): @@ -105,6 +110,18 @@ def test_pwl(): avrg_expected = (1.25 - 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.avrg(), avrg_expected, decimal=16) + # interval averaging + a = f.avrg([0.5, 2.5]) + assert_almost_equal(a, (1.375*0.5 - 0.45 + 0.75)/2.0, decimal=16) + a = f.avrg([1.5, 3.5]) + assert_almost_equal(a, (-0.425*0.5 + 0.75 + (0.75+0.75-0.5/1.5)/2) / 2.0, + decimal=16) + a = f.avrg([1.0, 3.5]) + assert_almost_equal(a, (-0.45 + 0.75 + (0.75+0.75-0.5/1.5)/2) / 2.5, + decimal=16) + a = f.avrg([1.0, 4.0]) + assert_almost_equal(a, (-0.45 + 0.75 + 1.5*0.5) / 3.0, decimal=16) + def test_pwl_add(): x = [0.0, 1.0, 2.0, 2.5, 4.0] -- cgit v1.2.3 From 3a36f81d52137435910a4b7c656a478ae5b38ece Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 23 Oct 2014 15:33:06 +0200 Subject: support for multiple averaging intervals --- examples/averages.py | 26 ++++----- pyspike/function.py | 144 ++++++++++++++++++++++++++++++++++++-------------- test/test_function.py | 10 +++- 3 files changed, 126 insertions(+), 54 deletions(-) (limited to 'test/test_function.py') diff --git a/examples/averages.py b/examples/averages.py index a06871a..6413ab2 100644 --- a/examples/averages.py +++ b/examples/averages.py @@ -20,23 +20,25 @@ print("ISI-distance: %.8f" % f.avrg()) isi1 = f.avrg(interval=(0, 1000)) isi2 = f.avrg(interval=(1000, 2000)) -isi3 = f.avrg(interval=(2000, 3000)) -isi4 = f.avrg(interval=(3000, 4000)) +isi3 = f.avrg(interval=[(0, 1000), (2000, 3000)]) +isi4 = f.avrg(interval=[(1000, 2000), (3000, 4000)]) -print("ISI-distance (0-1000): %.8f" % isi1) -print("ISI-distance (1000-2000): %.8f" % isi2) -print("ISI-distance (2000-3000): %.8f" % isi3) -print("ISI-distance (3000-4000): %.8f" % isi4) +print("ISI-distance (0-1000): %.8f" % isi1) +print("ISI-distance (1000-2000): %.8f" % isi2) +print("ISI-distance (0-1000) and (2000-3000): %.8f" % isi3) +print("ISI-distance (1000-2000) and (3000-4000): %.8f" % isi4) print() +f = spk.spike_profile(spike_trains[0], spike_trains[1]) + print("SPIKE-distance: %.8f" % f.avrg()) spike1 = f.avrg(interval=(0, 1000)) spike2 = f.avrg(interval=(1000, 2000)) -spike3 = f.avrg(interval=(2000, 3000)) -spike4 = f.avrg(interval=(3000, 4000)) +spike3 = f.avrg(interval=[(0, 1000), (2000, 3000)]) +spike4 = f.avrg(interval=[(1000, 2000), (3000, 4000)]) -print("SPIKE-distance (0-1000): %.8f" % spike1) -print("SPIKE-distance (1000-2000): %.8f" % spike2) -print("SPIKE-distance (2000-3000): %.8f" % spike3) -print("SPIKE-distance (3000-4000): %.8f" % spike4) +print("SPIKE-distance (0-1000): %.8f" % spike1) +print("SPIKE-distance (1000-2000): %.8f" % spike2) +print("SPIKE-distance (0-1000) and (2000-3000): %.8f" % spike3) +print("SPIKE-distance (1000-2000) and (3000-4000): %.8f" % spike4) diff --git a/pyspike/function.py b/pyspike/function.py index d53042f..0c0a391 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -11,6 +11,7 @@ Distributed under the BSD License from __future__ import print_function import numpy as np +import collections ############################################################## @@ -74,21 +75,18 @@ class PieceWiseConstFunc(object): return x_plot, y_plot - def avrg(self, interval=None): - """ Computes the average of the piece-wise const function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + def integral(self, interval=None): + """ Returns the integral over the given interval. - :param interval: averaging interval given as a pair of floats, if None - the average over the whole function is computed. + :param interval: integration interval given as a pair of floats, if + None the integral over the whole function is computed. :type interval: Pair of floats or None. - :returns: the average a. - :rtype: double - + :returns: the integral + :rtype: float """ if interval is None: - # no interval given, average over the whole spike train - a = np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ - (self.x[-1]-self.x[0]) + # no interval given, integrate over the whole spike train + a = np.sum((self.x[1:]-self.x[:-1]) * self.y) else: # find the indices corresponding to the interval start_ind = np.searchsorted(self.x, interval[0], side='right') @@ -103,7 +101,39 @@ class PieceWiseConstFunc(object): a += (self.x[start_ind]-interval[0]) * self.y[start_ind-1] # correction from last index to end a += (interval[1]-self.x[end_ind]) * self.y[end_ind] - a /= (interval[1]-interval[0]) + return a + + def avrg(self, interval=None): + """ Computes the average of the piece-wise const function: + :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + + :param interval: averaging interval given as a pair of floats, a + sequence of pairs for averaging multiple intervals, or + None, if None the average over the whole function is + computed. + :type interval: Pair, sequence of pairs, or None. + :returns: the average a. + :rtype: float + """ + if interval is None: + # no interval given, average over the whole spike train + return self.integral() / (self.x[-1]-self.x[0]) + + # check if interval is as sequence + assert isinstance(interval, collections.Sequence), \ + "Invalid value for `interval`. None, Sequence or Tuple expected." + # check if interval is a sequence of intervals + if not isinstance(interval[0], collections.Sequence): + # just one interval + a = self.integral(interval) / (interval[1]-interval[0]) + else: + # several intervals + a = 0.0 + int_length = 0.0 + for ival in interval: + a += self.integral(ival) + int_length += ival[1] - ival[0] + a /= int_length return a def add(self, f): @@ -203,16 +233,14 @@ class PieceWiseLinFunc: y_plot[1::2] = self.y2 return x_plot, y_plot - def avrg(self, interval=None): - """ Computes the average of the piece-wise linear function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + def integral(self, interval=None): + """ Returns the integral over the given interval. - :param interval: averaging interval given as a pair of floats, if None - the average over the whole function is computed. + :param interval: integration interval given as a pair of floats, if + None the integral over the whole function is computed. :type interval: Pair of floats or None. - :returns: the average a. - :rtype: double - + :returns: the integral + :rtype: float """ def intermediate_value(x0, x1, y0, y1, x): @@ -220,9 +248,8 @@ class PieceWiseLinFunc: return y0 + (y1-y0)*(x-x0)/(x1-x0) if interval is None: - # no interval given, average over the whole spike train - a = np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ - (self.x[-1]-self.x[0]) + # no interval given, integrate over the whole spike train + integral = np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) else: # find the indices corresponding to the interval start_ind = np.searchsorted(self.x, interval[0], side='right') @@ -230,26 +257,61 @@ class PieceWiseLinFunc: assert start_ind > 0 and end_ind < len(self.x), \ "Invalid averaging interval" # first the contribution from between the indices - a = np.sum((self.x[start_ind+1:end_ind+1] - - self.x[start_ind:end_ind]) * - 0.5*(self.y1[start_ind:end_ind] + - self.y2[start_ind:end_ind])) + integral = np.sum((self.x[start_ind+1:end_ind+1] - + self.x[start_ind:end_ind]) * + 0.5*(self.y1[start_ind:end_ind] + + self.y2[start_ind:end_ind])) # correction from start to first index - a += (self.x[start_ind]-interval[0]) * 0.5 * \ - (self.y2[start_ind-1] + - intermediate_value(self.x[start_ind-1], self.x[start_ind], - self.y1[start_ind-1], - self.y2[start_ind-1], - interval[0] - )) + integral += (self.x[start_ind]-interval[0]) * 0.5 * \ + (self.y2[start_ind-1] + + intermediate_value(self.x[start_ind-1], + self.x[start_ind], + self.y1[start_ind-1], + self.y2[start_ind-1], + interval[0] + )) # correction from last index to end - a += (interval[1]-self.x[end_ind]) * 0.5 * \ - (self.y1[end_ind] + - intermediate_value(self.x[end_ind], self.x[end_ind+1], - self.y1[end_ind], self.y2[end_ind], - interval[1] - )) - a /= (interval[1]-interval[0]) + integral += (interval[1]-self.x[end_ind]) * 0.5 * \ + (self.y1[end_ind] + + intermediate_value(self.x[end_ind], self.x[end_ind+1], + self.y1[end_ind], self.y2[end_ind], + interval[1] + )) + return integral + + def avrg(self, interval=None): + """ Computes the average of the piece-wise linear function: + :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + + :param interval: averaging interval given as a pair of floats, a + sequence of pairs for averaging multiple intervals, or + None, if None the average over the whole function is + computed. + :type interval: Pair, sequence of pairs, or None. + :returns: the average a. + :rtype: float + + """ + + if interval is None: + # no interval given, average over the whole spike train + return self.integral() / (self.x[-1]-self.x[0]) + + # check if interval is as sequence + assert isinstance(interval, collections.Sequence), \ + "Invalid value for `interval`. None, Sequence or Tuple expected." + # check if interval is a sequence of intervals + if not isinstance(interval[0], collections.Sequence): + # just one interval + a = self.integral(interval) / (interval[1]-interval[0]) + else: + # several intervals + a = 0.0 + int_length = 0.0 + for ival in interval: + a += self.integral(ival) + int_length += ival[1] - ival[0] + a /= int_length return a def add(self, f): diff --git a/test/test_function.py b/test/test_function.py index ed0b2ed..ba87db8 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -39,6 +39,10 @@ def test_pwc(): a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.5*0.75)/3.0, decimal=16) + # averaging over multiple intervals + a = f.avrg([(0.5, 1.5), (1.5, 3.5)]) + assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) + def test_pwc_add(): # some random data @@ -116,12 +120,16 @@ def test_pwl(): a = f.avrg([1.5, 3.5]) assert_almost_equal(a, (-0.425*0.5 + 0.75 + (0.75+0.75-0.5/1.5)/2) / 2.0, decimal=16) - a = f.avrg([1.0, 3.5]) + a = f.avrg((1.0, 3.5)) assert_almost_equal(a, (-0.45 + 0.75 + (0.75+0.75-0.5/1.5)/2) / 2.5, decimal=16) a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.45 + 0.75 + 1.5*0.5) / 3.0, decimal=16) + # averaging over multiple intervals + a = f.avrg([(0.5, 1.5), (1.5, 2.5)]) + assert_almost_equal(a, (1.375*0.5 - 0.45 + 0.75)/2.0, decimal=16) + def test_pwl_add(): x = [0.0, 1.0, 2.0, 2.5, 4.0] -- cgit v1.2.3 From 6c0f966649c8dedd4115d6809e569732ee5709c9 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 19 Jan 2015 22:32:42 +0100 Subject: interval averages for discrete functions --- pyspike/function.py | 34 ++++++++++++++++++++++++++-------- test/test_function.py | 27 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 8 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/function.py b/pyspike/function.py index 6fb7537..ebf4189 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -416,9 +416,9 @@ class DiscreteFunction(object): return 1.0*self.x, 1.0*self.y/self.mp def integral(self, interval=None): - """ Returns the integral over the given interval. For the interval - sequence this amounts to the sum over all values divided by the count - of intervals. + """ Returns the integral over the given interval. For the discrete + function, this amounts to the sum over all values divided by the total + multiplicity. :param interval: integration interval given as a pair of floats, if None the integral over the whole function is computed. @@ -429,9 +429,16 @@ class DiscreteFunction(object): if interval is None: # no interval given, integrate over the whole spike train # don't count the first value, which is zero by definition - a = 1.0*np.sum(self.y[1:-1]) + a = 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1]) else: - raise NotImplementedError() + # find the indices corresponding to the interval + start_ind = np.searchsorted(self.x, interval[0], side='right') + end_ind = np.searchsorted(self.x, interval[1], side='left') + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + # first the contribution from between the indices + a = np.sum(self.y[start_ind:end_ind]) / \ + np.sum(self.mp[start_ind:end_ind]) return a def avrg(self, interval=None): @@ -448,10 +455,21 @@ class DiscreteFunction(object): """ if interval is None: # no interval given, average over the whole spike train - # don't count the first interval for normalization - return self.integral() / np.sum(self.mp[1:-1]) + return self.integral() + + # check if interval is as sequence + assert isinstance(interval, collections.Sequence), \ + "Invalid value for `interval`. None, Sequence or Tuple expected." + # check if interval is a sequence of intervals + if not isinstance(interval[0], collections.Sequence): + # just one interval + a = self.integral(interval) else: - raise NotImplementedError() + # several intervals + a = 0.0 + for ival in interval: + a += self.integral(ival) + return a def add(self, f): """ Adds another `DiscreteFunction` function to this function. diff --git a/test/test_function.py b/test/test_function.py index ba87db8..da3d851 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -203,6 +203,33 @@ def test_pwl_avrg(): assert_array_almost_equal(f_avrg.y2, y2_expected, decimal=16) +def test_df(): + # testing discrete function + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [0.0, 1.0, 1.0, 0.0, 1.0] + mp = [1.0, 2.0, 1.0, 2.0, 1.0] + f = spk.DiscreteFunction(x, y, mp) + xp, yp = f.get_plottable_data() + + xp_expected = [0.0, 1.0, 2.0, 2.5, 4.0] + yp_expected = [0.0, 0.5, 1.0, 0.0, 1.0] + assert_array_almost_equal(xp, xp_expected, decimal=16) + assert_array_almost_equal(yp, yp_expected, decimal=16) + + avrg_expected = 2.0 / 5.0 + assert_almost_equal(f.avrg(), avrg_expected, decimal=16) + + # interval averaging + a = f.avrg([0.5, 2.4]) + assert_almost_equal(a, 2.0/3.0, decimal=16) + a = f.avrg([1.5, 3.5]) + assert_almost_equal(a, 1.0/3.0, decimal=16) + a = f.avrg((0.9, 3.5)) + assert_almost_equal(a, 2.0/5.0, decimal=16) + a = f.avrg([1.1, 4.0]) + assert_almost_equal(a, 1.0/3.0, decimal=16) + + if __name__ == "__main__": test_pwc() test_pwc_add() -- cgit v1.2.3 From f2d742c06fd013a013c811593257b67502ea9486 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 19 Jan 2015 23:07:09 +0100 Subject: fixed bug for multiple intervals --- pyspike/function.py | 60 ++++++++++++++++++++++++++------------------------- test/test_function.py | 7 ++++-- 2 files changed, 36 insertions(+), 31 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/function.py b/pyspike/function.py index ebf4189..62b0e2c 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -420,26 +420,44 @@ class DiscreteFunction(object): function, this amounts to the sum over all values divided by the total multiplicity. - :param interval: integration interval given as a pair of floats, if + :param interval: integration interval given as a pair of floats, or a + sequence of pairs in case of multiple intervals, if None the integral over the whole function is computed. - :type interval: Pair of floats or None. + :type interval: Pair, sequence of pairs, or None. :returns: the integral :rtype: float """ + + def get_indices(ival): + start_ind = np.searchsorted(self.x, ival[0], side='right') + end_ind = np.searchsorted(self.x, ival[1], side='left') + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + return start_ind, end_ind + if interval is None: # no interval given, integrate over the whole spike train # don't count the first value, which is zero by definition - a = 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1]) - else: + return 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1]) + + # check if interval is as sequence + assert isinstance(interval, collections.Sequence), \ + "Invalid value for `interval`. None, Sequence or Tuple expected." + # check if interval is a sequence of intervals + if not isinstance(interval[0], collections.Sequence): # find the indices corresponding to the interval - start_ind = np.searchsorted(self.x, interval[0], side='right') - end_ind = np.searchsorted(self.x, interval[1], side='left') - assert start_ind > 0 and end_ind < len(self.x), \ - "Invalid averaging interval" - # first the contribution from between the indices - a = np.sum(self.y[start_ind:end_ind]) / \ - np.sum(self.mp[start_ind:end_ind]) - return a + start_ind, end_ind = get_indices(interval) + return (np.sum(self.y[start_ind:end_ind]) / + np.sum(self.mp[start_ind:end_ind])) + else: + value = 0.0 + multiplicity = 0.0 + for ival in interval: + # find the indices corresponding to the interval + start_ind, end_ind = get_indices(ival) + value += np.sum(self.y[start_ind:end_ind]) + multiplicity += np.sum(self.mp[start_ind:end_ind]) + return value/multiplicity def avrg(self, interval=None): """ Computes the average of the interval sequence: @@ -453,23 +471,7 @@ class DiscreteFunction(object): :returns: the average a. :rtype: float """ - if interval is None: - # no interval given, average over the whole spike train - return self.integral() - - # check if interval is as sequence - assert isinstance(interval, collections.Sequence), \ - "Invalid value for `interval`. None, Sequence or Tuple expected." - # check if interval is a sequence of intervals - if not isinstance(interval[0], collections.Sequence): - # just one interval - a = self.integral(interval) - else: - # several intervals - a = 0.0 - for ival in interval: - a += self.integral(ival) - return a + return self.integral(interval) def add(self, f): """ Adds another `DiscreteFunction` function to this function. diff --git a/test/test_function.py b/test/test_function.py index da3d851..933fd2e 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -216,8 +216,7 @@ def test_df(): assert_array_almost_equal(xp, xp_expected, decimal=16) assert_array_almost_equal(yp, yp_expected, decimal=16) - avrg_expected = 2.0 / 5.0 - assert_almost_equal(f.avrg(), avrg_expected, decimal=16) + assert_almost_equal(f.avrg(), 2.0/5.0, decimal=16) # interval averaging a = f.avrg([0.5, 2.4]) @@ -229,6 +228,10 @@ def test_df(): a = f.avrg([1.1, 4.0]) assert_almost_equal(a, 1.0/3.0, decimal=16) + # averaging over multiple intervals + a = f.avrg([(0.5, 1.5), (1.5, 2.6)]) + assert_almost_equal(a, 2.0/5.0, decimal=16) + if __name__ == "__main__": test_pwc() -- cgit v1.2.3 From ae90eb6bdd6afd716cb90a4aeeec8e6e77635d10 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 28 Jan 2015 17:42:19 +0100 Subject: each function class in separate source file --- pyspike/__init__.py | 9 +- pyspike/distances.py | 13 +- pyspike/function.py | 585 -------------------------------------------------- test/test_function.py | 21 +- 4 files changed, 27 insertions(+), 601 deletions(-) delete mode 100644 pyspike/function.py (limited to 'test/test_function.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 55687e6..f480964 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -4,10 +4,13 @@ Copyright 2014, Mario Mulansky Distributed under the BSD License """ -__all__ = ["function", "distances", "spikes"] +__all__ = ["distances", "spikes", "PieceWiseConstFunc", "PieceWiseLinFunc", + "DiscreteFunc"] + +from PieceWiseConstFunc import PieceWiseConstFunc +from PieceWiseLinFunc import PieceWiseLinFunc +from DiscreteFunc import DiscreteFunc -from function import PieceWiseConstFunc, PieceWiseLinFunc, \ - DiscreteFunction, average_profile from distances import isi_profile, isi_distance, \ spike_profile, spike_distance, \ spike_sync_profile, spike_sync, \ diff --git a/pyspike/distances.py b/pyspike/distances.py index 5476b6f..9077871 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -11,7 +11,7 @@ import numpy as np import threading from functools import partial -from pyspike import PieceWiseConstFunc, PieceWiseLinFunc, DiscreteFunction +from pyspike import PieceWiseConstFunc, PieceWiseLinFunc, DiscreteFunc ############################################################ @@ -161,7 +161,7 @@ Falling back to slow python backend.") times, coincidences, multiplicity = coincidence_impl(spikes1, spikes2) - return DiscreteFunction(times, coincidences, multiplicity) + return DiscreteFunc(times, coincidences, multiplicity) ############################################################ @@ -212,7 +212,8 @@ def _generic_profile_multi(spike_trains, pair_distance_func, indices=None): assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ "Invalid index list." # generate a list of possible index pairs - pairs = [(indices[i], j) for i in range(len(indices)) for j in indices[i+1:]] + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] # start with first pair (i, j) = pairs[0] average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) @@ -251,7 +252,8 @@ def _multi_distance_par(spike_trains, pair_distance_func, indices=None): assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ "Invalid index list." # generate a list of possible index pairs - pairs = [(indices[i], j) for i in range(len(indices)) for j in indices[i+1:]] + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] num_pairs = len(pairs) # start with first pair @@ -430,7 +432,8 @@ def _generic_distance_matrix(spike_trains, dist_function, assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ "Invalid index list." # generate a list of possible index pairs - pairs = [(indices[i], j) for i in range(len(indices)) for j in indices[i+1:]] + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] distance_matrix = np.zeros((len(indices), len(indices))) for i, j in pairs: diff --git a/pyspike/function.py b/pyspike/function.py deleted file mode 100644 index 047c88a..0000000 --- a/pyspike/function.py +++ /dev/null @@ -1,585 +0,0 @@ -""" function.py - -Module containing classes representing piece-wise constant and piece-wise -linear functions. - -Copyright 2014, Mario Mulansky - -Distributed under the BSD License - -""" -from __future__ import print_function - -import numpy as np -import collections - - -############################################################## -# PieceWiseConstFunc -############################################################## -class PieceWiseConstFunc(object): - """ A class representing a piece-wise constant function. """ - - def __init__(self, x, y): - """ Constructs the piece-wise const function. - - :param x: array of length N+1 defining the edges of the intervals of - the pwc function. - :param y: array of length N defining the function values at the - intervals. - """ - # convert parameters to arrays, also ensures copying - self.x = np.array(x) - self.y = np.array(y) - - def copy(self): - """ Returns a copy of itself - - :rtype: :class:`PieceWiseConstFunc` - """ - return PieceWiseConstFunc(self.x, self.y) - - def almost_equal(self, other, decimal=14): - """ Checks if the function is equal to another function up to `decimal` - precision. - - :param other: another :class:`PieceWiseConstFunc` - :returns: True if the two functions are equal up to `decimal` decimals, - False otherwise - :rtype: bool - """ - eps = 10.0**(-decimal) - return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ - np.allclose(self.y, other.y, atol=eps, rtol=0.0) - - def get_plottable_data(self): - """ Returns two arrays containing x- and y-coordinates for immeditate - plotting of the piece-wise function. - - :returns: (x_plot, y_plot) containing plottable data - :rtype: pair of np.array - - Example:: - - x, y = f.get_plottable_data() - plt.plot(x, y, '-o', label="Piece-wise const function") - """ - - x_plot = np.empty(2*len(self.x)-2) - x_plot[0] = self.x[0] - x_plot[1::2] = self.x[1:] - x_plot[2::2] = self.x[1:-1] - y_plot = np.empty(2*len(self.y)) - y_plot[::2] = self.y - y_plot[1::2] = self.y - - return x_plot, y_plot - - def integral(self, interval=None): - """ Returns the integral over the given interval. - - :param interval: integration interval given as a pair of floats, if - None the integral over the whole function is computed. - :type interval: Pair of floats or None. - :returns: the integral - :rtype: float - """ - if interval is None: - # no interval given, integrate over the whole spike train - a = np.sum((self.x[1:]-self.x[:-1]) * self.y) - else: - # find the indices corresponding to the interval - start_ind = np.searchsorted(self.x, interval[0], side='right') - end_ind = np.searchsorted(self.x, interval[1], side='left')-1 - assert start_ind > 0 and end_ind < len(self.x), \ - "Invalid averaging interval" - # first the contribution from between the indices - a = np.sum((self.x[start_ind+1:end_ind+1] - - self.x[start_ind:end_ind]) * - self.y[start_ind:end_ind]) - # correction from start to first index - a += (self.x[start_ind]-interval[0]) * self.y[start_ind-1] - # correction from last index to end - a += (interval[1]-self.x[end_ind]) * self.y[end_ind] - return a - - def avrg(self, interval=None): - """ Computes the average of the piece-wise const function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. - - :param interval: averaging interval given as a pair of floats, a - sequence of pairs for averaging multiple intervals, or - None, if None the average over the whole function is - computed. - :type interval: Pair, sequence of pairs, or None. - :returns: the average a. - :rtype: float - """ - if interval is None: - # no interval given, average over the whole spike train - return self.integral() / (self.x[-1]-self.x[0]) - - # check if interval is as sequence - assert isinstance(interval, collections.Sequence), \ - "Invalid value for `interval`. None, Sequence or Tuple expected." - # check if interval is a sequence of intervals - if not isinstance(interval[0], collections.Sequence): - # just one interval - a = self.integral(interval) / (interval[1]-interval[0]) - else: - # several intervals - a = 0.0 - int_length = 0.0 - for ival in interval: - a += self.integral(ival) - int_length += ival[1] - ival[0] - a /= int_length - return a - - def add(self, f): - """ Adds another PieceWiseConst function to this function. - Note: only functions defined on the same interval can be summed. - - :param f: :class:`PieceWiseConstFunc` function to be added. - :rtype: None - """ - assert self.x[0] == f.x[0], "The functions have different intervals" - assert self.x[-1] == f.x[-1], "The functions have different intervals" - - # cython version - try: - from cython_add import add_piece_wise_const_cython as \ - add_piece_wise_const_impl - except ImportError: - print("Warning: add_piece_wise_const_cython not found. Make sure \ -that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ -\n Falling back to slow python backend.") - # use python backend - from python_backend import add_piece_wise_const_python as \ - add_piece_wise_const_impl - - self.x, self.y = add_piece_wise_const_impl(self.x, self.y, f.x, f.y) - - def mul_scalar(self, fac): - """ Multiplies the function with a scalar value - - :param fac: Value to multiply - :type fac: double - :rtype: None - """ - self.y *= fac - - -############################################################## -# PieceWiseLinFunc -############################################################## -class PieceWiseLinFunc: - """ A class representing a piece-wise linear function. """ - - def __init__(self, x, y1, y2): - """ Constructs the piece-wise linear function. - - :param x: array of length N+1 defining the edges of the intervals of - the pwc function. - :param y1: array of length N defining the function values at the left - of the intervals. - :param y2: array of length N defining the function values at the right - of the intervals. - """ - # convert to array, which also ensures copying - self.x = np.array(x) - self.y1 = np.array(y1) - self.y2 = np.array(y2) - - def copy(self): - """ Returns a copy of itself - - :rtype: :class:`PieceWiseLinFunc` - """ - return PieceWiseLinFunc(self.x, self.y1, self.y2) - - def almost_equal(self, other, decimal=14): - """ Checks if the function is equal to another function up to `decimal` - precision. - - :param other: another :class:`PieceWiseLinFunc` - :returns: True if the two functions are equal up to `decimal` decimals, - False otherwise - :rtype: bool - """ - eps = 10.0**(-decimal) - return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ - np.allclose(self.y1, other.y1, atol=eps, rtol=0.0) and \ - np.allclose(self.y2, other.y2, atol=eps, rtol=0.0) - - def get_plottable_data(self): - """ Returns two arrays containing x- and y-coordinates for immeditate - plotting of the piece-wise function. - - :returns: (x_plot, y_plot) containing plottable data - :rtype: pair of np.array - - Example:: - - x, y = f.get_plottable_data() - plt.plot(x, y, '-o', label="Piece-wise const function") - """ - x_plot = np.empty(2*len(self.x)-2) - x_plot[0] = self.x[0] - x_plot[1::2] = self.x[1:] - x_plot[2::2] = self.x[1:-1] - y_plot = np.empty_like(x_plot) - y_plot[0::2] = self.y1 - y_plot[1::2] = self.y2 - return x_plot, y_plot - - def integral(self, interval=None): - """ Returns the integral over the given interval. - - :param interval: integration interval given as a pair of floats, if - None the integral over the whole function is computed. - :type interval: Pair of floats or None. - :returns: the integral - :rtype: float - """ - - def intermediate_value(x0, x1, y0, y1, x): - """ computes the intermediate value of a linear function """ - return y0 + (y1-y0)*(x-x0)/(x1-x0) - - if interval is None: - # no interval given, integrate over the whole spike train - integral = np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) - else: - # find the indices corresponding to the interval - start_ind = np.searchsorted(self.x, interval[0], side='right') - end_ind = np.searchsorted(self.x, interval[1], side='left')-1 - assert start_ind > 0 and end_ind < len(self.x), \ - "Invalid averaging interval" - # first the contribution from between the indices - integral = np.sum((self.x[start_ind+1:end_ind+1] - - self.x[start_ind:end_ind]) * - 0.5*(self.y1[start_ind:end_ind] + - self.y2[start_ind:end_ind])) - # correction from start to first index - integral += (self.x[start_ind]-interval[0]) * 0.5 * \ - (self.y2[start_ind-1] + - intermediate_value(self.x[start_ind-1], - self.x[start_ind], - self.y1[start_ind-1], - self.y2[start_ind-1], - interval[0] - )) - # correction from last index to end - integral += (interval[1]-self.x[end_ind]) * 0.5 * \ - (self.y1[end_ind] + - intermediate_value(self.x[end_ind], self.x[end_ind+1], - self.y1[end_ind], self.y2[end_ind], - interval[1] - )) - return integral - - def avrg(self, interval=None): - """ Computes the average of the piece-wise linear function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. - - :param interval: averaging interval given as a pair of floats, a - sequence of pairs for averaging multiple intervals, or - None, if None the average over the whole function is - computed. - :type interval: Pair, sequence of pairs, or None. - :returns: the average a. - :rtype: float - - """ - - if interval is None: - # no interval given, average over the whole spike train - return self.integral() / (self.x[-1]-self.x[0]) - - # check if interval is as sequence - assert isinstance(interval, collections.Sequence), \ - "Invalid value for `interval`. None, Sequence or Tuple expected." - # check if interval is a sequence of intervals - if not isinstance(interval[0], collections.Sequence): - # just one interval - a = self.integral(interval) / (interval[1]-interval[0]) - else: - # several intervals - a = 0.0 - int_length = 0.0 - for ival in interval: - a += self.integral(ival) - int_length += ival[1] - ival[0] - a /= int_length - return a - - def add(self, f): - """ Adds another PieceWiseLin function to this function. - Note: only functions defined on the same interval can be summed. - - :param f: :class:`PieceWiseLinFunc` function to be added. - :rtype: None - """ - assert self.x[0] == f.x[0], "The functions have different intervals" - assert self.x[-1] == f.x[-1], "The functions have different intervals" - - # python implementation - # from python_backend import add_piece_wise_lin_python - # self.x, self.y1, self.y2 = add_piece_wise_lin_python( - # self.x, self.y1, self.y2, f.x, f.y1, f.y2) - - # cython version - try: - from cython_add import add_piece_wise_lin_cython as \ - add_piece_wise_lin_impl - except ImportError: - print("Warning: add_piece_wise_lin_cython not found. Make sure \ -that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ -\n Falling back to slow python backend.") - # use python backend - from python_backend import add_piece_wise_lin_python as \ - add_piece_wise_lin_impl - - self.x, self.y1, self.y2 = add_piece_wise_lin_impl( - self.x, self.y1, self.y2, f.x, f.y1, f.y2) - - def mul_scalar(self, fac): - """ Multiplies the function with a scalar value - - :param fac: Value to multiply - :type fac: double - :rtype: None - """ - self.y1 *= fac - self.y2 *= fac - - -############################################################## -# DiscreteFunction -############################################################## -class DiscreteFunction(object): - """ A class representing values defined on a discrete set of points. - """ - - def __init__(self, x, y, multiplicity): - """ Constructs the discrete function. - - :param x: array of length N defining the points at which the values are - defined. - :param y: array of length N degining the values at the points x. - :param multiplicity: array of length N defining the multiplicity of the - values. - """ - # convert parameters to arrays, also ensures copying - self.x = np.array(x) - self.y = np.array(y) - self.mp = np.array(multiplicity) - - def copy(self): - """ Returns a copy of itself - - :rtype: :class:`DiscreteFunction` - """ - return DiscreteFunction(self.x, self.y, self.mp) - - def almost_equal(self, other, decimal=14): - """ Checks if the function is equal to another function up to `decimal` - precision. - - :param other: another :class:`DiscreteFunction` - :returns: True if the two functions are equal up to `decimal` decimals, - False otherwise - :rtype: bool - """ - eps = 10.0**(-decimal) - return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ - np.allclose(self.y, other.y, atol=eps, rtol=0.0) and \ - np.allclose(self.mp, other.mp, atol=eps, rtol=0.0) - - def get_plottable_data(self, averaging_window_size=0): - """ Returns two arrays containing x- and y-coordinates for plotting - the interval sequence. The optional parameter `averaging_window_size` - determines the size of an averaging window to smoothen the profile. If - this value is 0, no averaging is performed. - - :param averaging_window_size: size of the averaging window, default=0. - :returns: (x_plot, y_plot) containing plottable data - :rtype: pair of np.array - - Example:: - - x, y = f.get_plottable_data() - plt.plot(x, y, '-o', label="Discrete function") - """ - - if averaging_window_size > 0: - # for the averaged profile we have to take the multiplicity into - # account. values with higher multiplicity should be consider as if - # they appeared several times. Hence we can not know how many - # entries we have to consider to the left and right. Rather, we - # will iterate until some wanted multiplicity is reached. - - # the first value in self.mp contains the number of averaged - # profiles without any possible extra multiplicities - # (by implementation) - expected_mp = (averaging_window_size+1) * int(self.mp[0]) - y_plot = np.zeros_like(self.y) - # compute the values in a loop, could be done in cython if required - for i in xrange(len(y_plot)): - - if self.mp[i] >= expected_mp: - # the current value contains already all the wanted - # multiplicity - y_plot[i] = self.y[i]/self.mp[i] - continue - - # first look to the right - y = self.y[i] - mp_r = self.mp[i] - j = i+1 - while j < len(y_plot): - if mp_r+self.mp[j] < expected_mp: - # if we still dont reach the required multiplicity - # we take the whole value - y += self.y[j] - mp_r += self.mp[j] - else: - # otherwise, just some fraction - y += self.y[j] * (expected_mp - mp_r)/self.mp[j] - mp_r += (expected_mp - mp_r) - break - j += 1 - - # same story to the left - mp_l = self.mp[i] - j = i-1 - while j >= 0: - if mp_l+self.mp[j] < expected_mp: - y += self.y[j] - mp_l += self.mp[j] - else: - y += self.y[j] * (expected_mp - mp_l)/self.mp[j] - mp_l += (expected_mp - mp_l) - break - j -= 1 - y_plot[i] = y/(mp_l+mp_r-self.mp[i]) - return 1.0*self.x, y_plot - - else: # k = 0 - - return 1.0*self.x, 1.0*self.y/self.mp - - def integral(self, interval=None): - """ Returns the integral over the given interval. For the discrete - function, this amounts to the sum over all values divided by the total - multiplicity. - - :param interval: integration interval given as a pair of floats, or a - sequence of pairs in case of multiple intervals, if - None the integral over the whole function is computed. - :type interval: Pair, sequence of pairs, or None. - :returns: the integral - :rtype: float - """ - - def get_indices(ival): - """ Retuns the indeces surrounding the given interval""" - start_ind = np.searchsorted(self.x, ival[0], side='right') - end_ind = np.searchsorted(self.x, ival[1], side='left') - assert start_ind > 0 and end_ind < len(self.x), \ - "Invalid averaging interval" - return start_ind, end_ind - - if interval is None: - # no interval given, integrate over the whole spike train - # don't count the first value, which is zero by definition - return 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1]) - - # check if interval is as sequence - assert isinstance(interval, collections.Sequence), \ - "Invalid value for `interval`. None, Sequence or Tuple expected." - # check if interval is a sequence of intervals - if not isinstance(interval[0], collections.Sequence): - # find the indices corresponding to the interval - start_ind, end_ind = get_indices(interval) - return (np.sum(self.y[start_ind:end_ind]) / - np.sum(self.mp[start_ind:end_ind])) - else: - value = 0.0 - multiplicity = 0.0 - for ival in interval: - # find the indices corresponding to the interval - start_ind, end_ind = get_indices(ival) - value += np.sum(self.y[start_ind:end_ind]) - multiplicity += np.sum(self.mp[start_ind:end_ind]) - return value/multiplicity - - def avrg(self, interval=None): - """ Computes the average of the interval sequence: - :math:`a = 1/N sum f_n ` where N is the number of intervals. - - :param interval: averaging interval given as a pair of floats, a - sequence of pairs for averaging multiple intervals, or - None, if None the average over the whole function is - computed. - :type interval: Pair, sequence of pairs, or None. - :returns: the average a. - :rtype: float - """ - return self.integral(interval) - - def add(self, f): - """ Adds another `DiscreteFunction` function to this function. - Note: only functions defined on the same interval can be summed. - - :param f: :class:`DiscreteFunction` function to be added. - :rtype: None - """ - assert self.x[0] == f.x[0], "The functions have different intervals" - assert self.x[-1] == f.x[-1], "The functions have different intervals" - - # cython version - try: - from cython_add import add_discrete_function_cython as \ - add_discrete_function_impl - except ImportError: - print("Warning: add_discrete_function_cython not found. Make \ -sure that PySpike is installed by running\n\ -'python setup.py build_ext --inplace'! \ -\n Falling back to slow python backend.") - # use python backend - from python_backend import add_discrete_function_python as \ - add_discrete_function_impl - - self.x, self.y, self.mp = \ - add_discrete_function_impl(self.x, self.y, self.mp, - f.x, f.y, f.mp) - - def mul_scalar(self, fac): - """ Multiplies the function with a scalar value - - :param fac: Value to multiply - :type fac: double - :rtype: None - """ - self.y *= fac - - -def average_profile(profiles): - """ Computes the average profile from the given ISI- or SPIKE-profiles. - - :param profiles: list of :class:`PieceWiseConstFunc` or - :class:`PieceWiseLinFunc` representing ISI- or - SPIKE-profiles to be averaged. - :returns: the averages profile :math:`` or :math:``. - :rtype: :class:`PieceWiseConstFunc` or :class:`PieceWiseLinFunc` - """ - assert len(profiles) > 1 - - avrg_profile = profiles[0].copy() - for i in xrange(1, len(profiles)): - avrg_profile.add(profiles[i]) - avrg_profile.mul_scalar(1.0/len(profiles)) # normalize - - return avrg_profile diff --git a/test/test_function.py b/test/test_function.py index 933fd2e..d81b03a 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -92,11 +92,12 @@ def test_pwc_avrg(): y = [0.5, 1.0, -0.25, 0.0, 1.5] f2 = spk.PieceWiseConstFunc(x, y) - f_avrg = spk.average_profile([f1, f2]) + f1.add(f2) + f1.mul_scalar(0.5) x_expected = [0.0, 0.75, 1.0, 2.0, 2.5, 2.7, 4.0] y_expected = [0.75, 1.0, 0.25, 0.625, 0.375, 1.125] - assert_array_almost_equal(f_avrg.x, x_expected, decimal=16) - assert_array_almost_equal(f_avrg.y, y_expected, decimal=16) + assert_array_almost_equal(f1.x, x_expected, decimal=16) + assert_array_almost_equal(f1.y, y_expected, decimal=16) def test_pwl(): @@ -196,11 +197,12 @@ def test_pwl_avrg(): y2_expected = np.array([0.8+1.0+0.5*0.75, 1.5+1.0-0.8*0.25/1.25, -0.4+0.2, 1.5-1.0, 0.75-0.5*0.2/1.5, 2.25]) / 2 - f_avrg = spk.average_profile([f1, f2]) + f1.add(f2) + f1.mul_scalar(0.5) - assert_array_almost_equal(f_avrg.x, x_expected, decimal=16) - assert_array_almost_equal(f_avrg.y1, y1_expected, decimal=16) - assert_array_almost_equal(f_avrg.y2, y2_expected, decimal=16) + assert_array_almost_equal(f1.x, x_expected, decimal=16) + assert_array_almost_equal(f1.y1, y1_expected, decimal=16) + assert_array_almost_equal(f1.y2, y2_expected, decimal=16) def test_df(): @@ -208,7 +210,7 @@ def test_df(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [0.0, 1.0, 1.0, 0.0, 1.0] mp = [1.0, 2.0, 1.0, 2.0, 1.0] - f = spk.DiscreteFunction(x, y, mp) + f = spk.DiscreteFunc(x, y, mp) xp, yp = f.get_plottable_data() xp_expected = [0.0, 1.0, 2.0, 2.5, 4.0] @@ -237,6 +239,9 @@ if __name__ == "__main__": test_pwc() test_pwc_add() test_pwc_mul() + test_pwc_avrg() test_pwl() test_pwl_add() test_pwl_mul() + test_pwl_avrg() + test_df() -- cgit v1.2.3 From 6f418a5a837939b132967bcdb3ff0ede6d899bd2 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 12 May 2015 18:45:03 +0200 Subject: +functions to obtain values of the pwc/pwl profile Added __call__ operators to PieceWiseConst and PieceWiseLin class for obtaining function values at certain points in time. --- pyspike/PieceWiseConstFunc.py | 22 ++++++++++++++++++++++ pyspike/PieceWiseLinFunc.py | 30 ++++++++++++++++++++++++++++++ test/test_function.py | 32 +++++++++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 1 deletion(-) (limited to 'test/test_function.py') diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index 41998ef..cf64e58 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -26,6 +26,28 @@ class PieceWiseConstFunc(object): self.x = np.array(x) self.y = np.array(y) + def __call__(self, t): + """ Returns the function value for the given time t. If t is a list of + times, the corresponding list of values is returned. + + :param: time t, or list of times + :returns: function value(s) at that time(s). + """ + assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \ + "Invalid time: " + str(t) + + ind = np.searchsorted(self.x, t, side='right') + # correct the cases t == x[0], t == x[-1] + try: + ind[ind == 0] = 1 + ind[ind == len(self.x)] = len(self.x)-1 + except TypeError: + if ind == 0: + ind = 1 + if ind == len(self.x): + ind = len(self.x)-1 + return self.y[ind-1] + def copy(self): """ Returns a copy of itself diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index f2442be..b9787eb 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -29,6 +29,36 @@ class PieceWiseLinFunc: self.y1 = np.array(y1) self.y2 = np.array(y2) + def __call__(self, t): + """ Returns the function value for the given time t. If t is a list of + times, the corresponding list of values is returned. + + :param: time t, or list of times + :returns: function value(s) at that time(s). + """ + def intermediate_value(x0, x1, y0, y1, x): + """ computes the intermediate value of a linear function """ + return y0 + (y1-y0)*(x-x0)/(x1-x0) + + assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \ + "Invalid time: " + str(t) + + ind = np.searchsorted(self.x, t, side='right') + # correct the cases t == x[0], t == x[-1] + try: + ind[ind == 0] = 1 + ind[ind == len(self.x)] = len(self.x)-1 + except TypeError: + if ind == 0: + ind = 1 + if ind == len(self.x): + ind = len(self.x)-1 + return intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) + def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index d81b03a..c56a295 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,7 +10,8 @@ Distributed under the BSD License from __future__ import print_function import numpy as np from copy import copy -from numpy.testing import assert_almost_equal, assert_array_almost_equal +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_equal, assert_array_almost_equal import pyspike as spk @@ -20,6 +21,17 @@ def test_pwc(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) + + # function values + assert_equal(f(0.0), 1.0) + assert_equal(f(0.5), 1.0) + assert_equal(f(2.25), 1.5) + assert_equal(f(3.5), 0.75) + assert_equal(f(4.0), 0.75) + + assert_array_equal(f([0.0, 0.5, 2.25, 3.5, 4.0]), + [1.0, 1.0, 1.5, 0.75, 0.75]) + xp, yp = f.get_plottable_data() xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] @@ -38,11 +50,17 @@ def test_pwc(): assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16) a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.5*0.75)/3.0, decimal=16) + a = f.avrg([0.0, 2.2]) + assert_almost_equal(a, (1.0*1.0-0.5*1.0+0.2*1.5)/2.2, decimal=15) # averaging over multiple intervals a = f.avrg([(0.5, 1.5), (1.5, 3.5)]) assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) + # averaging over multiple intervals + a = f.avrg([(0.5, 1.5), (2.2, 3.5)]) + assert_almost_equal(a, (0.5*1.0-0.5*0.5+0.3*1.5+1.0*0.75)/2.3, decimal=15) + def test_pwc_add(): # some random data @@ -105,6 +123,18 @@ def test_pwl(): y1 = [1.0, -0.5, 1.5, 0.75] y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) + + # function values + assert_equal(f(0.0), 1.0) + assert_equal(f(0.5), 1.25) + assert_equal(f(2.25), 1.5) + assert_equal(f(2.5), 0.75) + assert_equal(f(3.5), 0.75-0.5*1.0/1.5) + assert_equal(f(4.0), 0.25) + + assert_array_equal(f([0.0, 0.5, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.25, 1.5, 0.75, 0.75-0.5*1.0/1.5, 0.25]) + xp, yp = f.get_plottable_data() xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] -- cgit v1.2.3 From 8841138b74242ed9eb77c972c76e9a617778a79a Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 13 May 2015 18:19:02 +0200 Subject: pwc function now returns intermediate value at exact spike times --- pyspike/PieceWiseConstFunc.py | 35 +++++++++++++++++++++++++++-------- test/test_function.py | 6 ++++-- 2 files changed, 31 insertions(+), 10 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index cf64e58..dea1a56 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -37,16 +37,35 @@ class PieceWiseConstFunc(object): "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') - # correct the cases t == x[0], t == x[-1] - try: + if isinstance(t, collections.Sequence): + # t is a sequence of values + # correct the cases t == x[0], t == x[-1] ind[ind == 0] = 1 ind[ind == len(self.x)] = len(self.x)-1 - except TypeError: - if ind == 0: - ind = 1 - if ind == len(self.x): - ind = len(self.x)-1 - return self.y[ind-1] + value = self.y[ind-1] + # correct the values at exact spike times: there the value should + # be the at half of the step + # obtain the 'left' side indices for t + ind_l = np.searchsorted(self.x, t, side='left') + # if left and right side indices differ, the time t has to appear + # in self.x + ind_at_spike = ind[np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x))] + value[ind_at_spike] = 0.5 * (self.y[ind_at_spike-1] + + self.y[ind_at_spike-2]) + return value + else: + # specific check for interval edges + if t == self.x[0]: + return self.y[0] + if t == self.x[-1]: + return self.y[-1] + # check if we are on any other exact spike time + if sum(self.x == t) > 0: + # use the middle of the left and right ISI value + return 0.5 * (self.y[ind-1] + self.y[ind-2]) + return self.y[ind-1] def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index c56a295..8ad4b17 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -25,12 +25,14 @@ def test_pwc(): # function values assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.0) + assert_equal(f(1.0), 0.25) assert_equal(f(2.25), 1.5) + assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75) assert_equal(f(4.0), 0.75) - assert_array_equal(f([0.0, 0.5, 2.25, 3.5, 4.0]), - [1.0, 1.0, 1.5, 0.75, 0.75]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.0, 0.25, 1.5, 2.25/2, 0.75, 0.75]) xp, yp = f.get_plottable_data() -- cgit v1.2.3 From a61a14295e28e6e95fa510693a11ae8c78a552ab Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sun, 17 May 2015 17:50:39 +0200 Subject: return correct values at exact spike times pwc and pwl function object return the average of the left and right limit as function value at the exact spike times. --- pyspike/PieceWiseConstFunc.py | 15 ++++++++----- pyspike/PieceWiseLinFunc.py | 51 +++++++++++++++++++++++++++++++++---------- test/test_function.py | 13 ++++++----- 3 files changed, 56 insertions(+), 23 deletions(-) (limited to 'test/test_function.py') diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index dea1a56..6d7a845 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -49,13 +49,16 @@ class PieceWiseConstFunc(object): ind_l = np.searchsorted(self.x, t, side='left') # if left and right side indices differ, the time t has to appear # in self.x - ind_at_spike = ind[np.logical_and(np.logical_and(ind != ind_l, - ind > 1), - ind < len(self.x))] - value[ind_at_spike] = 0.5 * (self.y[ind_at_spike-1] + - self.y[ind_at_spike-2]) + ind_at_spike = np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x)) + # get the corresponding indices for the resulting value array + val_ind = np.arange(len(ind))[ind_at_spike] + # and for the arrays self.x, y1, y2 + xy_ind = ind[ind_at_spike] + value[val_ind] = 0.5 * (self.y[xy_ind-1] + self.y[xy_ind-2]) return value - else: + else: # t is a single value # specific check for interval edges if t == self.x[0]: return self.y[0] diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index b9787eb..03c2da2 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -44,20 +44,47 @@ class PieceWiseLinFunc: "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') - # correct the cases t == x[0], t == x[-1] - try: + if isinstance(t, collections.Sequence): + # t is a sequence of values + # correct the cases t == x[0], t == x[-1] ind[ind == 0] = 1 ind[ind == len(self.x)] = len(self.x)-1 - except TypeError: - if ind == 0: - ind = 1 - if ind == len(self.x): - ind = len(self.x)-1 - return intermediate_value(self.x[ind-1], - self.x[ind], - self.y1[ind-1], - self.y2[ind-1], - t) + value = intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) + # correct the values at exact spike times: there the value should + # be the at half of the step + # obtain the 'left' side indices for t + ind_l = np.searchsorted(self.x, t, side='left') + # if left and right side indices differ, the time t has to appear + # in self.x + ind_at_spike = np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x)) + # get the corresponding indices for the resulting value array + val_ind = np.arange(len(ind))[ind_at_spike] + # and for the values in self.x, y1, y2 + xy_ind = ind[ind_at_spike] + # the values are defined as the average of the left and right limit + value[val_ind] = 0.5 * (self.y1[xy_ind-1] + self.y2[xy_ind-2]) + return value + else: # t is a single value + # specific check for interval edges + if t == self.x[0]: + return self.y1[0] + if t == self.x[-1]: + return self.y2[-1] + # check if we are on any other exact spike time + if sum(self.x == t) > 0: + # use the middle of the left and right Spike value + return 0.5 * (self.y1[ind-1] + self.y2[ind-2]) + return intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index 8ad4b17..92d378d 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -26,13 +26,14 @@ def test_pwc(): assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.0) assert_equal(f(1.0), 0.25) + assert_equal(f(2.0), 0.5) assert_equal(f(2.25), 1.5) assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75) assert_equal(f(4.0), 0.75) - assert_array_equal(f([0.0, 0.5, 1.0, 2.25, 2.5, 3.5, 4.0]), - [1.0, 1.0, 0.25, 1.5, 2.25/2, 0.75, 0.75]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.0, 0.25, 0.5, 1.5, 2.25/2, 0.75, 0.75]) xp, yp = f.get_plottable_data() @@ -129,13 +130,15 @@ def test_pwl(): # function values assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.25) + assert_equal(f(1.0), 0.5) + assert_equal(f(2.0), 1.1/2) assert_equal(f(2.25), 1.5) - assert_equal(f(2.5), 0.75) + assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75-0.5*1.0/1.5) assert_equal(f(4.0), 0.25) - assert_array_equal(f([0.0, 0.5, 2.25, 2.5, 3.5, 4.0]), - [1.0, 1.25, 1.5, 0.75, 0.75-0.5*1.0/1.5, 0.25]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.25, 0.5, 0.55, 1.5, 2.25/2, 0.75-0.5/1.5, 0.25]) xp, yp = f.get_plottable_data() -- cgit v1.2.3 From 34bd30415dd93a2425ce566627e24ee9483ada3e Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 20 Sep 2018 10:49:42 -0700 Subject: Spike Order support (#39) * reorganized directionality module * further refactoring of directionality * completed python directionality backend * added SPIKE-Sync based filtering new function filter_by_spike_sync removes spikes that have a multi-variate Spike Sync value below some threshold not yet fully tested, python backend missing. * spike sync filtering, cython sim ann Added function for filtering out events based on a threshold for the spike sync values. Usefull for focusing on synchronous events during directionality analysis. Also added cython version of simulated annealing for performance. * added coincidence single profile to python backend missing function in python backend added, identified and fixed a bug in the implementation as well * updated test case to new spike sync behavior * python3 fixes * another python3 fix * reorganized directionality module * further refactoring of directionality * completed python directionality backend * added SPIKE-Sync based filtering new function filter_by_spike_sync removes spikes that have a multi-variate Spike Sync value below some threshold not yet fully tested, python backend missing. * spike sync filtering, cython sim ann Added function for filtering out events based on a threshold for the spike sync values. Usefull for focusing on synchronous events during directionality analysis. Also added cython version of simulated annealing for performance. * added coincidence single profile to python backend missing function in python backend added, identified and fixed a bug in the implementation as well * updated test case to new spike sync behavior * python3 fixes * another python3 fix * Fix absolute imports in directionality measures * remove commented code * Add directionality to docs, bump version * Clean up directionality module, add doxy. * Remove debug print from tests * Fix bug in calling Python backend * Fix incorrect integrals in PieceWiseConstFunc (#36) * Add (some currently failing) tests for PieceWiseConstFunc.integral * Fix implementation of PieceWiseConstFunc.integral Just by adding a special condition for when we are only taking an integral "between" two edges of a PieceWiseConstFunc All tests now pass. Fixes #33. * Add PieceWiseConstFunc.integral tests for ValueError * Add testing bounds of integral * Raise ValueError in function implementation * Fix incorrect integrals in PieceWiseLinFunc (#38) Integrals of piece-wise linear functions were incorrect if the requested interval lies completely between two support points. This has been fixed, and a unit test exercising this behavior was added. Fixes #38 * Add Spike Order example and Tutorial section Adds an example computing spike order profile and the optimal spike train order. Also adds a section on spike train order to the tutorial. --- Changelog | 3 + Readme.rst | 9 +- doc/pyspike.rst | 6 + doc/tutorial.rst | 66 +++ examples/spike_train_order.py | 52 +++ pyspike/PieceWiseConstFunc.py | 32 +- pyspike/PieceWiseLinFunc.py | 42 +- pyspike/__init__.py | 16 +- pyspike/cython/cython_directionality.pyx | 262 ++++++++++++ pyspike/cython/cython_distances.pyx | 200 +++++++++ pyspike/cython/cython_profiles.pyx | 33 ++ pyspike/cython/cython_simulated_annealing.pyx | 82 ++++ pyspike/cython/directionality_python_backend.py | 144 +++++++ pyspike/cython/python_backend.py | 67 ++- pyspike/spike_directionality.py | 522 ++++++++++++++++++++++++ pyspike/spike_sync.py | 55 ++- setup.py | 28 +- test/test_directionality.py | 97 +++++ test/test_function.py | 62 +++ test/test_sync_filter.py | 95 +++++ 20 files changed, 1812 insertions(+), 61 deletions(-) create mode 100644 examples/spike_train_order.py create mode 100644 pyspike/cython/cython_directionality.pyx create mode 100644 pyspike/cython/cython_simulated_annealing.pyx create mode 100644 pyspike/cython/directionality_python_backend.py create mode 100644 pyspike/spike_directionality.py create mode 100644 test/test_directionality.py create mode 100644 test/test_sync_filter.py (limited to 'test/test_function.py') diff --git a/Changelog b/Changelog index 21b7cb0..88e16cc 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,6 @@ +PySpike v0.6: + * Support for computing spike directionality and spike train order + PySpike v0.5: * First beta release * Python 2.6 support removed diff --git a/Readme.rst b/Readme.rst index 0422dad..74b014b 100644 --- a/Readme.rst +++ b/Readme.rst @@ -31,19 +31,14 @@ Additionally, depending on the used methods: ISI-distance [1], SPIKE-distance [2 Important Changelog ----------------------------- +With version 0.6.0, the spike directionality and spike train order function have been added. + With version 0.5.0, the interfaces have been unified and the specific functions for multivariate computations have become deprecated. With version 0.2.0, the :code:`SpikeTrain` class has been introduced to represent spike trains. This is a breaking change in the function interfaces. Hence, programs written for older versions of PySpike (0.1.x) will not run with newer versions. - -Upcoming Functionality -------------------------- - -In an upcoming release, new functionality for analyzing Synfire patterns based on the new measures SPIKE-Order and Spike-Train-Order method will become part of the PySpike library. -The new measures and algorithms are described in `this preprint `_. - Requirements and Installation ----------------------------- diff --git a/doc/pyspike.rst b/doc/pyspike.rst index 74ab439..3b10d2a 100644 --- a/doc/pyspike.rst +++ b/doc/pyspike.rst @@ -64,6 +64,12 @@ PSTH :undoc-members: :show-inheritance: +Directionality +........................................ +.. automodule:: pyspike.spike_directionality + :members: + :undoc-members: + :show-inheritance: Helper functions ........................................ diff --git a/doc/tutorial.rst b/doc/tutorial.rst index aff03a8..377c0a2 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -231,3 +231,69 @@ The following example computes and plots the ISI- and SPIKE-distance matrix as w plt.title("SPIKE-Sync") plt.show() + + +Quantifying Leaders and Followers: Spike Train Order +--------------------------------------- + +PySpike provides functionality to quantify how much a set of spike trains +resembles a synfire pattern (ie perfect leader-follower pattern). For details +on the algorithms please see +`our article in NJP `_. + +The following example computes the Spike Order profile and Synfire Indicator +of two Poissonian spike trains. + +.. code:: python + import numpy as np + from matplotlib import pyplot as plt + import pyspike as spk + + + st1 = spk.generate_poisson_spikes(1.0, [0, 20]) + st2 = spk.generate_poisson_spikes(1.0, [0, 20]) + + d = spk.spike_directionality(st1, st2) + + print "Spike Directionality of two Poissonian spike trains:", d + + E = spk.spike_train_order_profile(st1, st2) + + plt.figure() + x, y = E.get_plottable_data() + plt.plot(x, y, '-ob') + plt.ylim(-1.1, 1.1) + plt.xlabel("t") + plt.ylabel("E") + plt.title("Spike Train Order Profile") + + plt.show() + +Additionally, PySpike can also compute the optimal ordering of the spike trains, +ie the ordering that most resembles a synfire pattern. The following example +computes the optimal order of a set of 20 Poissonian spike trains: + +.. code:: python + + M = 20 + spike_trains = [spk.generate_poisson_spikes(1.0, [0, 100]) for m in xrange(M)] + + F_init = spk.spike_train_order(spike_trains) + print "Initial Synfire Indicator for 20 Poissonian spike trains:", F_init + + D_init = spk.spike_directionality_matrix(spike_trains) + phi, _ = spk.optimal_spike_train_sorting(spike_trains) + F_opt = spk.spike_train_order(spike_trains, indices=phi) + print "Synfire Indicator of optimized spike train sorting:", F_opt + + D_opt = spk.permutate_matrix(D_init, phi) + + plt.figure() + plt.imshow(D_init) + plt.title("Initial Directionality Matrix") + + plt.figure() + plt.imshow(D_opt) + plt.title("Optimized Directionality Matrix") + + plt.show() diff --git a/examples/spike_train_order.py b/examples/spike_train_order.py new file mode 100644 index 0000000..3a42472 --- /dev/null +++ b/examples/spike_train_order.py @@ -0,0 +1,52 @@ +import numpy as np +from matplotlib import pyplot as plt +import pyspike as spk + + +st1 = spk.generate_poisson_spikes(1.0, [0, 20]) +st2 = spk.generate_poisson_spikes(1.0, [0, 20]) + +d = spk.spike_directionality(st1, st2) + +print "Spike Directionality of two Poissonian spike trains:", d + +E = spk.spike_train_order_profile(st1, st2) + +plt.figure() +x, y = E.get_plottable_data() +plt.plot(x, y, '-ob') +plt.ylim(-1.1, 1.1) +plt.xlabel("t") +plt.ylabel("E") +plt.title("Spike Train Order Profile") + + +###### Optimize spike train order of 20 Random spike trains ####### + +M = 20 + +spike_trains = [spk.generate_poisson_spikes(1.0, [0, 100]) for m in xrange(M)] + +F_init = spk.spike_train_order(spike_trains) + +print "Initial Synfire Indicator for 20 Poissonian spike trains:", F_init + +D_init = spk.spike_directionality_matrix(spike_trains) + +phi, _ = spk.optimal_spike_train_sorting(spike_trains) + +F_opt = spk.spike_train_order(spike_trains, indices=phi) + +print "Synfire Indicator of optimized spike train sorting:", F_opt + +D_opt = spk.permutate_matrix(D_init, phi) + +plt.figure() +plt.imshow(D_init) +plt.title("Initial Directionality Matrix") + +plt.figure() +plt.imshow(D_opt) +plt.title("Optimized Directionality Matrix") + +plt.show() diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index 5ce5f27..17fdd3f 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -129,19 +129,31 @@ class PieceWiseConstFunc(object): # no interval given, integrate over the whole spike train a = np.sum((self.x[1:]-self.x[:-1]) * self.y) else: + if interval[0]>interval[1]: + raise ValueError("Invalid averaging interval: interval[0]>=interval[1]") + if interval[0]self.x[-1]: + raise ValueError("Invalid averaging interval: interval[0] 0 and end_ind < len(self.x), \ - "Invalid averaging interval" - # first the contribution from between the indices - a = np.sum((self.x[start_ind+1:end_ind+1] - - self.x[start_ind:end_ind]) * - self.y[start_ind:end_ind]) - # correction from start to first index - a += (self.x[start_ind]-interval[0]) * self.y[start_ind-1] - # correction from last index to end - a += (interval[1]-self.x[end_ind]) * self.y[end_ind] + if start_ind > end_ind: + # contribution from between two closest edges + a = (self.x[start_ind]-self.x[end_ind]) * self.y[end_ind] + # minus the part that is not within the interval + a -= ((interval[0]-self.x[end_ind])+(self.x[start_ind]-interval[1])) * self.y[end_ind] + else: + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + # first the contribution from between the indices + a = np.sum((self.x[start_ind+1:end_ind+1] - + self.x[start_ind:end_ind]) * + self.y[start_ind:end_ind]) + # correction from start to first index + a += (self.x[start_ind]-interval[0]) * self.y[start_ind-1] + # correction from last index to end + a += (interval[1]-self.x[end_ind]) * self.y[end_ind] return a def avrg(self, interval=None): diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index 8145e63..8faaec4 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -146,31 +146,47 @@ class PieceWiseLinFunc: if interval is None: # no interval given, integrate over the whole spike train - integral = np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) + return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) + + # find the indices corresponding to the interval + start_ind = np.searchsorted(self.x, interval[0], side='right') + end_ind = np.searchsorted(self.x, interval[1], side='left')-1 + assert start_ind > 0 and end_ind < len(self.x), \ + "Invalid averaging interval" + if start_ind > end_ind: + print(start_ind, end_ind, self.x[start_ind]) + # contribution from between two closest edges + y_x0 = intermediate_value(self.x[start_ind-1], + self.x[start_ind], + self.y1[start_ind-1], + self.y2[start_ind-1], + interval[0]) + y_x1 = intermediate_value(self.x[start_ind-1], + self.x[start_ind], + self.y1[start_ind-1], + self.y2[start_ind-1], + interval[1]) + print(y_x0, y_x1, interval[1] - interval[0]) + integral = (y_x0 + y_x1) * 0.5 * (interval[1] - interval[0]) + print(integral) else: - # find the indices corresponding to the interval - start_ind = np.searchsorted(self.x, interval[0], side='right') - end_ind = np.searchsorted(self.x, interval[1], side='left')-1 - assert start_ind > 0 and end_ind < len(self.x), \ - "Invalid averaging interval" # first the contribution from between the indices integral = np.sum((self.x[start_ind+1:end_ind+1] - - self.x[start_ind:end_ind]) * - 0.5*(self.y1[start_ind:end_ind] + - self.y2[start_ind:end_ind])) + self.x[start_ind:end_ind]) * + 0.5*(self.y1[start_ind:end_ind] + + self.y2[start_ind:end_ind])) # correction from start to first index integral += (self.x[start_ind]-interval[0]) * 0.5 * \ (self.y2[start_ind-1] + - intermediate_value(self.x[start_ind-1], + intermediate_value(self.x[start_ind-1], self.x[start_ind], self.y1[start_ind-1], self.y2[start_ind-1], - interval[0] - )) + interval[0])) # correction from last index to end integral += (interval[1]-self.x[end_ind]) * 0.5 * \ (self.y1[end_ind] + - intermediate_value(self.x[end_ind], self.x[end_ind+1], + intermediate_value(self.x[end_ind], self.x[end_ind+1], self.y1[end_ind], self.y2[end_ind], interval[1] )) diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 08253fb..3897d18 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,5 +1,5 @@ """ -Copyright 2014-2015, Mario Mulansky +Copyright 2014-2018, Mario Mulansky Distributed under the BSD License """ @@ -7,8 +7,8 @@ Distributed under the BSD License from __future__ import absolute_import __all__ = ["isi_distance", "spike_distance", "spike_sync", "psth", - "spikes", "SpikeTrain", "PieceWiseConstFunc", "PieceWiseLinFunc", - "DiscreteFunc", "directionality"] + "spikes", "spike_directionality", "SpikeTrain", + "PieceWiseConstFunc", "PieceWiseLinFunc", "DiscreteFunc"] from .PieceWiseConstFunc import PieceWiseConstFunc from .PieceWiseLinFunc import PieceWiseLinFunc @@ -20,13 +20,21 @@ from .isi_distance import isi_profile, isi_distance, isi_profile_multi,\ from .spike_distance import spike_profile, spike_distance, spike_profile_multi,\ spike_distance_multi, spike_distance_matrix from .spike_sync import spike_sync_profile, spike_sync,\ - spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix + spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix,\ + filter_by_spike_sync from .psth import psth from .spikes import load_spike_trains_from_txt, save_spike_trains_to_txt, \ spike_train_from_string, import_spike_trains_from_time_series, \ merge_spike_trains, generate_poisson_spikes +from .spike_directionality import spike_directionality, \ + spike_directionality_values, spike_directionality_matrix, \ + spike_train_order_profile, spike_train_order_profile_bi, \ + spike_train_order_profile_multi, spike_train_order, \ + spike_train_order_bi, spike_train_order_multi, \ + optimal_spike_train_sorting, permutate_matrix + # define the __version__ following # http://stackoverflow.com/questions/17583443 from pkg_resources import get_distribution, DistributionNotFound diff --git a/pyspike/cython/cython_directionality.pyx b/pyspike/cython/cython_directionality.pyx new file mode 100644 index 0000000..ac37690 --- /dev/null +++ b/pyspike/cython/cython_directionality.pyx @@ -0,0 +1,262 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_directionality.pyx + +cython implementation of the spike delay asymmetry measures + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_directionality.pyx + +which gives:: + + cython_directionality.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport fabs +from libc.math cimport fmax +from libc.math cimport fmin + +# from pyspike.cython.cython_distances cimport get_tau + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +############################################################ +# get_tau +############################################################ +cdef inline double get_tau(double[:] spikes1, double[:] spikes2, + int i, int j, double interval, double max_tau): + cdef double m = interval # use interval length as initial tau + cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 + cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 + if i < N1 and i > -1: + m = fmin(m, spikes1[i+1]-spikes1[i]) + if j < N2 and j > -1: + m = fmin(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = fmin(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = fmin(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = fmin(m, max_tau) + return m + + +############################################################ +# spike_train_order_profile_cython +############################################################ +def spike_train_order_profile_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, + double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int n = 0 + cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times + cdef double[:] a = np.zeros(N1 + N2 + 2) # asymmetry values + cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + st[n] = spikes1[i] + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 after spike train 2 + # both get marked with -1 + a[n] = -1 + a[n-1] = -1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + st[n] = spikes2[j] + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 before spike train 2 + # both get marked with 1 + a[n] = 1 + a[n-1] = 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + n += 1 + # add only one event with zero asymmetry value and multiplicity 2 + st[n] = spikes1[i] + a[n] = 0 + mp[n] = 2 + + st = st[:n+2] + a = a[:n+2] + mp = mp[:n+2] + + st[0] = t_start + st[len(st)-1] = t_end + if N1 + N2 > 0: + a[0] = a[1] + a[len(a)-1] = a[len(a)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + a[0] = 1 + a[1] = 1 + + return st, a, mp + + +############################################################ +# spike_train_order_cython +############################################################ +def spike_train_order_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int d = 0 + cdef int mp = 0 + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike in spike train 2 appeared before spike in spike train 1 + # mark with -1 + d -= 2 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike in spike train 1 appeared before spike in spike train 2 + # mark with +1 + d += 2 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + # add only one event with multiplicity 2, but no asymmetry counting + mp += 2 + + if d == 0 and mp == 0: + # empty spike trains -> spike sync = 1 by definition + d = 1 + mp = 1 + + return d, mp + + +############################################################ +# spike_directionality_profiles_cython +############################################################ +def spike_directionality_profiles_cython(double[:] spikes1, + double[:] spikes2, + double t_start, double t_end, + double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef double[:] d1 = np.zeros(N1) # directionality values + cdef double[:] d2 = np.zeros(N2) # directionality values + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 after spike train 2 + # leading spike gets +1, following spike -1 + d1[i] = -1 + d2[j] = +1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 before spike train 2 + # leading spike gets +1, following spike -1 + d1[i] = +1 + d2[j] = -1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + # equal spike times: zero asymmetry value + d1[i] = 0 + d2[j] = 0 + + return d1, d2 + + +############################################################ +# spike_directionality_cython +############################################################ +def spike_directionality_cython(double[:] spikes1, + double[:] spikes2, + double t_start, double t_end, + double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int d = 0 # directionality value + cdef double interval = t_end - t_start + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 after spike train 2 + # leading spike gets +1, following spike -1 + d -= 1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike from spike train 1 before spike train 2 + # leading spike gets +1, following spike -1 + d += 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + + return d diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index ac5f226..d4070ae 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -178,6 +178,8 @@ cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: return 0.5*(isi1+isi2)*(isi1+isi2) # alternative definition to obtain ~ 0.5 for Poisson spikes # return 0.5*(isi1*isi1+isi2*isi2) + # another alternative definition without second normalization + # return 0.5*(isi1+isi2) ############################################################ @@ -248,6 +250,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, index2 = 0 y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) index = 1 while index1+index2 < N1+N2-2: @@ -267,6 +271,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p1 s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -286,6 +292,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_p1 # s2 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): index2 += 1 # first calculate the previous interval end value @@ -301,6 +309,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, t_curr = t_p2 s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) @@ -320,6 +330,9 @@ def spike_distance_cython(double[:] t1, double[:] t2, s2 = dt_p2 # s1 is the same as above, thus we can compute y2 immediately y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_start = (s1 + s2) / isi_avrg_cython(isi1, isi2) + else: # t_f1 == t_f2 - generate only one event index1 += 1 index2 += 1 @@ -358,6 +371,193 @@ def spike_distance_cython(double[:] t1, double[:] t2, s1 = dt_f1 # *(t_end-t1[N1-1])/isi1 s2 = dt_f2 # *(t_end-t2[N2-1])/isi2 y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + # y_end = (s1 + s2) / isi_avrg_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) + # end nogil + + # use only the data added above + # could be less than original length due to equal spike times + return spike_value / (t_end-t_start) + + +############################################################ +# isi_avrg_rf_cython +############################################################ +cdef inline double isi_avrg_rf_cython(double isi1, double isi2) nogil: + # rate free version + return (isi1+isi2) + + +############################################################ +# spike_distance_rf_cython +############################################################ +def spike_distance_rf_cython(double[:] t1, double[:] t2, + double t_start, double t_end): + + cdef int N1, N2, index1, index2, index + cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 + cdef double isi1, isi2, s1, s2 + cdef double y_start, y_end, t_last, t_current, spike_value + + spike_value = 0.0 + + N1 = len(t1) + N2 = len(t2) + + with nogil: # release the interpreter to allow multithreading + t_last = t_start + t_p1 = t_start + t_p2 = t_start + if t1[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f1 = t1[0] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) + dt_p1 = dt_f1 + s1 = dt_p1*(t_f1-t_start)/isi1 + index1 = -1 + else: + t_f1 = t1[1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + dt_p1 = 0.0 + isi1 = t1[1]-t1[0] + s1 = dt_p1 + index1 = 0 + if t2[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f2 = t2[0] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = dt_f2 + isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) + s2 = dt_p2*(t_f2-t_start)/isi2 + index2 = -1 + else: + t_f2 = t2[1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = 0.0 + isi2 = t2[1]-t2[0] + s2 = dt_p2 + index2 = 0 + + # y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + index = 1 + + while index1+index2 < N1+N2-2: + # print(index, index1, index2) + if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): + index1 += 1 + # first calculate the previous interval end value + s1 = dt_f1*(t_f1-t_p1) / isi1 + # the previous time now was the following time before: + dt_p1 = dt_f1 + t_p1 = t_f1 # t_p1 contains the current time point + # get the next time + if index1 < N1-1: + t_f1 = t1[index1+1] + else: + t_f1 = t_end + t_curr = t_p1 + s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 + # y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index1 < N1-1: + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1-t_p1 + s1 = dt_p1 + else: + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # s1 needs adjustment due to change of isi1 + s1 = dt_p1*(t_end-t1[N1-1])/isi1 + # s2 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): + index2 += 1 + # first calculate the previous interval end value + s2 = dt_f2*(t_f2-t_p2) / isi2 + # the previous time now was the following time before: + dt_p2 = dt_f2 + t_p2 = t_f2 # t_p2 contains the current time point + # get the next time + if index2 < N2-1: + t_f2 = t2[index2+1] + else: + t_f2 = t_end + t_curr = t_p2 + s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index2 < N2-1: + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2-t_p2 + s2 = dt_p2 + else: + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + # s2 needs adjustment due to change of isi2 + s2 = dt_p2*(t_end-t2[N2-1])/isi2 + # s1 is the same as above, thus we can compute y2 immediately + # y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_start = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + + else: # t_f1 == t_f2 - generate only one event + index1 += 1 + index2 += 1 + t_p1 = t_f1 + t_p2 = t_f2 + dt_p1 = 0.0 + dt_p2 = 0.0 + t_curr = t_f1 + y_end = 0.0 + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + y_start = 0.0 + if index1 < N1-1: + t_f1 = t1[index1+1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1 - t_p1 + else: + t_f1 = t_end + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + if index2 < N2-1: + t_f2 = t2[index2+1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2 - t_p2 + else: + t_f2 = t_end + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + index += 1 + t_last = t_curr + # isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + s1 = dt_f1*(t_end-t1[N1-1])/isi1 + s2 = dt_f2*(t_end-t2[N2-1])/isi2 + # y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # alternative definition without second normalization + y_end = (s1 + s2) / isi_avrg_rf_cython(isi1, isi2) + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) # end nogil diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index 4a42cdb..aa24db4 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -450,3 +450,36 @@ def coincidence_profile_cython(double[:] spikes1, double[:] spikes2, c[1] = 1 return st, c, mp + + +############################################################ +# coincidence_single_profile_cython +############################################################ +def coincidence_single_profile_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int j = -1 + cdef double[:] c = np.zeros(N1) # coincidences + cdef double interval = t_end - t_start + cdef double tau + for i in xrange(N1): + while j < N2-1 and spikes2[j+1] < spikes1[i]: + # move forward until spikes2[j] is the last spike before spikes1[i] + # note that if spikes2[j] is after spikes1[i] we dont do anything + j += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if j > -1 and fabs(spikes1[i]-spikes2[j]) < tau: + # current spike in st1 is coincident + c[i] = 1 + if j < N2-1 and (j < 0 or spikes2[j] < spikes1[i]): + # in case spikes2[j] is before spikes1[i] it has to be the one + # right before (see above), hence we move one forward and also + # check the next spike + j += 1 + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) + if fabs(spikes2[j]-spikes1[i]) < tau: + # current spike in st1 is coincident + c[i] = 1 + return c diff --git a/pyspike/cython/cython_simulated_annealing.pyx b/pyspike/cython/cython_simulated_annealing.pyx new file mode 100644 index 0000000..be9423c --- /dev/null +++ b/pyspike/cython/cython_simulated_annealing.pyx @@ -0,0 +1,82 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_simulated_annealing.pyx + +cython implementation of a simulated annealing algorithm to find the optimal +spike train order + +Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects +improves the performance of spike_distance by a factor of 10! + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_simulated_annealing.pyx + +which gives: + + cython_simulated_annealing.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport exp +from libc.math cimport fmod +from libc.stdlib cimport rand +from libc.stdlib cimport RAND_MAX + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +def sim_ann_cython(double[:, :] D, double T_start, double T_end, double alpha): + + cdef long N = len(D) + cdef double A = np.sum(np.triu(D, 0)) + cdef long[:] p = np.arange(N) + cdef double T = T_start + cdef long iterations + cdef long succ_iter + cdef long total_iter = 0 + cdef double delta_A + cdef long ind1 + cdef long ind2 + + while T > T_end: + iterations = 0 + succ_iter = 0 + # equilibrate for 100*N steps or 10*N successful steps + while iterations < 100*N and succ_iter < 10*N: + # exchange two rows and cols + # ind1 = np.random.randint(N-1) + ind1 = rand() % (N-1) + if ind1 < N-1: + ind2 = ind1+1 + else: # this can never happen! + ind2 = 0 + delta_A = -2*D[p[ind1], p[ind2]] + if delta_A > 0.0 or exp(delta_A/T) > ((1.0*rand()) / RAND_MAX): + # swap indices + p[ind1], p[ind2] = p[ind2], p[ind1] + A += delta_A + succ_iter += 1 + iterations += 1 + total_iter += iterations + T *= alpha # cool down + if succ_iter == 0: + # no successful step -> we believe we have converged + break + + return p, A, total_iter diff --git a/pyspike/cython/directionality_python_backend.py b/pyspike/cython/directionality_python_backend.py new file mode 100644 index 0000000..c1d820b --- /dev/null +++ b/pyspike/cython/directionality_python_backend.py @@ -0,0 +1,144 @@ +""" directionality_python_backend.py + +Collection of python functions that can be used instead of the cython +implementation. + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +import numpy as np + + +############################################################ +# spike_train_order_python +############################################################ +def spike_directionality_profile_python(spikes1, spikes2, t_start, t_end, + max_tau): + + def get_tau(spikes1, spikes2, i, j, max_tau): + m = t_end - t_start # use interval as initial tau + if i < len(spikes1)-1 and i > -1: + m = min(m, spikes1[i+1]-spikes1[i]) + if j < len(spikes2)-1 and j > -1: + m = min(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = min(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = min(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = min(m, max_tau) + return m + + N1 = len(spikes1) + N2 = len(spikes2) + i = -1 + j = -1 + d1 = np.zeros(N1) # directionality values + d2 = np.zeros(N2) # directionality values + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # spike in first spike train occurs after second + d1[i] = -1 + d2[j] = +1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # spike in second spike train occurs after first + d1[i] = +1 + d2[j] = -1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + d1[i] = 0 + d2[j] = 0 + + return d1, d2 + + +############################################################ +# spike_train_order_python +############################################################ +def spike_train_order_profile_python(spikes1, spikes2, t_start, t_end, + max_tau): + + def get_tau(spikes1, spikes2, i, j, max_tau): + m = t_end - t_start # use interval as initial tau + if i < len(spikes1)-1 and i > -1: + m = min(m, spikes1[i+1]-spikes1[i]) + if j < len(spikes2)-1 and j > -1: + m = min(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = min(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = min(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = min(m, max_tau) + return m + + N1 = len(spikes1) + N2 = len(spikes2) + i = -1 + j = -1 + n = 0 + st = np.zeros(N1 + N2 + 2) # spike times + a = np.zeros(N1 + N2 + 2) # coincidences + mp = np.ones(N1 + N2 + 2) # multiplicity + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes1[i] + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + a[n] = -1 + a[n-1] = -1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes2[j] + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + a[n] = 1 + a[n-1] = 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + n += 1 + # add only one event with zero asymmetry value and multiplicity 2 + st[n] = spikes1[i] + a[n] = 0 + mp[n] = 2 + + st = st[:n+2] + a = a[:n+2] + mp = mp[:n+2] + + st[0] = t_start + st[len(st)-1] = t_end + if N1 + N2 > 0: + a[0] = a[1] + a[len(a)-1] = a[len(a)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + a[0] = 1 + a[1] = 1 + + return st, a, mp diff --git a/pyspike/cython/python_backend.py b/pyspike/cython/python_backend.py index 6b7209a..e75f181 100644 --- a/pyspike/cython/python_backend.py +++ b/pyspike/cython/python_backend.py @@ -3,7 +3,7 @@ Collection of python functions that can be used instead of the cython implementation. -Copyright 2014, Mario Mulansky +Copyright 2014-2015, Mario Mulansky Distributed under the BSD License @@ -356,26 +356,27 @@ def cumulative_sync_python(spikes1, spikes2): return st, c +def get_tau(spikes1, spikes2, i, j, max_tau, init_tau): + m = init_tau + if i < len(spikes1)-1 and i > -1: + m = min(m, spikes1[i+1]-spikes1[i]) + if j < len(spikes2)-1 and j > -1: + m = min(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = min(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = min(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = min(m, max_tau) + return m + + ############################################################ # coincidence_python ############################################################ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): - def get_tau(spikes1, spikes2, i, j, max_tau): - m = t_end - t_start # use interval as initial tau - if i < len(spikes1)-1 and i > -1: - m = min(m, spikes1[i+1]-spikes1[i]) - if j < len(spikes2)-1 and j > -1: - m = min(m, spikes2[j+1]-spikes2[j]) - if i > 0: - m = min(m, spikes1[i]-spikes1[i-1]) - if j > 0: - m = min(m, spikes2[j]-spikes2[j-1]) - m *= 0.5 - if max_tau > 0.0: - m = min(m, max_tau) - return m - N1 = len(spikes1) N2 = len(spikes2) i = -1 @@ -388,7 +389,7 @@ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): i += 1 n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, max_tau, t_end-t_start) st[n] = spikes1[i] if j > -1 and spikes1[i]-spikes2[j] < tau: # coincidence between the current spike and the previous spike @@ -398,7 +399,7 @@ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): j += 1 n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, max_tau, t_end-t_start) st[n] = spikes2[j] if i > -1 and spikes2[j]-spikes1[i] < tau: # coincidence between the current spike and the previous spike @@ -433,6 +434,36 @@ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): return st, c, mp +############################################################ +# coincidence_single_profile_cython +############################################################ +def coincidence_single_python(spikes1, spikes2, t_start, t_end, max_tau): + + N1 = len(spikes1) + N2 = len(spikes2) + j = -1 + c = np.zeros(N1) # coincidences + for i in range(N1): + while j < N2-1 and spikes2[j+1] < spikes1[i]: + # move forward until spikes2[j] is the last spike before spikes1[i] + # note that if spikes2[j] is after spikes1[i] we dont do anything + j += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau, t_end-t_start) + if j > -1 and abs(spikes1[i]-spikes2[j]) < tau: + # current spike in st1 is coincident + c[i] = 1 + if j < N2-1 and (j < 0 or spikes2[j] < spikes1[i]): + # in case spikes2[j] is before spikes1[i] it has to be the first or + # the one right before (see above), hence we move one forward and + # also check the next spike + j += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau, t_end-t_start) + if abs(spikes2[j]-spikes1[i]) < tau: + # current spike in st1 is coincident + c[i] = 1 + return c + + ############################################################ # add_piece_wise_const_python ############################################################ diff --git a/pyspike/spike_directionality.py b/pyspike/spike_directionality.py new file mode 100644 index 0000000..248862c --- /dev/null +++ b/pyspike/spike_directionality.py @@ -0,0 +1,522 @@ +# Module containing functions to compute the SPIKE directionality and the +# spike train order profile +# Copyright 2015, Mario Mulansky +# Distributed under the BSD License + +from __future__ import absolute_import + +import numpy as np +import pyspike +from pyspike import DiscreteFunc +from functools import partial +from pyspike.generic import _generic_profile_multi + + +############################################################ +# spike_directionality_values +############################################################ +def spike_directionality_values(*args, **kwargs): + """ Computes the spike directionality value for each spike in + each spike train. Returns a list containing an array of spike directionality + values for every given spike train. + + Valid call structures:: + + spike_directionality_values(st1, st2) # returns the bi-variate profile + spike_directionality_values(st1, st2, st3) # multi-variate profile of 3 + # spike trains + + spike_trains = [st1, st2, st3, st4] # list of spike trains + spike_directionality_values(spike_trains) # profile of the list of spike trains + spike_directionality_values(spike_trains, indices=[0, 1]) # use only the spike trains + # given by the indices + + Additonal arguments: + :param max_tau: Upper bound for coincidence window (default=None). + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + + :returns: The spike directionality values :math:`D^n_i` as a list of arrays. + """ + if len(args) == 1: + return _spike_directionality_values_impl(args[0], **kwargs) + else: + return _spike_directionality_values_impl(args, **kwargs) + + +def _spike_directionality_values_impl(spike_trains, indices=None, + interval=None, max_tau=None): + """ Computes the multi-variate spike directionality profile + of the given spike trains. + + :param spike_trains: List of spike trains. + :type spike_trains: List of :class:`pyspike.SpikeTrain` + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike-directionality values. + """ + if interval is not None: + raise NotImplementedError("Parameter `interval` not supported.") + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # list of arrays for resulting asymmetry values + asymmetry_list = [np.zeros_like(spike_trains[n].spikes) for n in indices] + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + # cython implementation + try: + from .cython.cython_directionality import \ + spike_directionality_profiles_cython as profile_impl + except ImportError: + if not(pyspike.disable_backend_warning): + print("Warning: spike_distance_cython not found. Make sure that \ +PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ +Falling back to slow python backend.") + # use python backend + from .cython.directionality_python_backend import \ + spike_directionality_profile_python as profile_impl + + if max_tau is None: + max_tau = 0.0 + + for i, j in pairs: + d1, d2 = profile_impl(spike_trains[i].spikes, spike_trains[j].spikes, + spike_trains[i].t_start, spike_trains[i].t_end, + max_tau) + asymmetry_list[i] += d1 + asymmetry_list[j] += d2 + for a in asymmetry_list: + a /= len(spike_trains)-1 + return asymmetry_list + + +############################################################ +# spike_directionality +############################################################ +def spike_directionality(spike_train1, spike_train2, normalize=True, + interval=None, max_tau=None): + """ Computes the overall spike directionality of the first spike train with + respect to the second spike train. + + :param spike_train1: First spike train. + :type spike_train1: :class:`pyspike.SpikeTrain` + :param spike_train2: Second spike train. + :type spike_train2: :class:`pyspike.SpikeTrain` + :param normalize: Normalize by the number of spikes (multiplicity). + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike train order profile :math:`E(t)`. + """ + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from .cython.cython_directionality import \ + spike_directionality_cython as spike_directionality_impl + if max_tau is None: + max_tau = 0.0 + d = spike_directionality_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + c = len(spike_train1.spikes) + except ImportError: + if not(pyspike.disable_backend_warning): + print("Warning: spike_distance_cython not found. Make sure that \ +PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ +Falling back to slow python backend.") + # use profile. + d1, x = spike_directionality_values([spike_train1, spike_train2], + interval=interval, + max_tau=max_tau) + d = np.sum(d1) + c = len(spike_train1.spikes) + if normalize: + return 1.0*d/c + else: + return d + else: + # some specific interval is provided: not yet implemented + raise NotImplementedError("Parameter `interval` not supported.") + + +############################################################ +# spike_directionality_matrix +############################################################ +def spike_directionality_matrix(spike_trains, normalize=True, indices=None, + interval=None, max_tau=None): + """ Computes the spike directionality matrix for the given spike trains. + + :param spike_trains: List of spike trains. + :type spike_trains: List of :class:`pyspike.SpikeTrain` + :param normalize: Normalize by the number of spikes (multiplicity). + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike-directionality values. + """ + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + distance_matrix = np.zeros((len(indices), len(indices))) + for i, j in pairs: + d = spike_directionality(spike_trains[i], spike_trains[j], normalize, + interval, max_tau=max_tau) + distance_matrix[i, j] = d + distance_matrix[j, i] = -d + return distance_matrix + + +############################################################ +# spike_train_order_profile +############################################################ +def spike_train_order_profile(*args, **kwargs): + """ Computes the spike train order profile :math:`E(t)` of the given + spike trains. Returns the profile as a DiscreteFunction object. + + Valid call structures:: + + spike_train_order_profile(st1, st2) # returns the bi-variate profile + spike_train_order_profile(st1, st2, st3) # multi-variate profile of 3 + # spike trains + + spike_trains = [st1, st2, st3, st4] # list of spike trains + spike_train_order_profile(spike_trains) # profile of the list of spike trains + spike_train_order_profile(spike_trains, indices=[0, 1]) # use only the spike trains + # given by the indices + + Additonal arguments: + :param max_tau: Upper bound for coincidence window, `default=None`. + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + + :returns: The spike train order profile :math:`E(t)` + :rtype: :class:`.DiscreteFunction` + """ + if len(args) == 1: + return spike_train_order_profile_multi(args[0], **kwargs) + elif len(args) == 2: + return spike_train_order_profile_bi(args[0], args[1], **kwargs) + else: + return spike_train_order_profile_multi(args, **kwargs) + + +############################################################ +# spike_train_order_profile_bi +############################################################ +def spike_train_order_profile_bi(spike_train1, spike_train2, max_tau=None): + """ Computes the spike train order profile P(t) of the two given + spike trains. Returns the profile as a DiscreteFunction object. + + :param spike_train1: First spike train. + :type spike_train1: :class:`pyspike.SpikeTrain` + :param spike_train2: Second spike train. + :type spike_train2: :class:`pyspike.SpikeTrain` + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike train order profile :math:`E(t)`. + :rtype: :class:`pyspike.function.DiscreteFunction` + """ + # check whether the spike trains are defined for the same interval + assert spike_train1.t_start == spike_train2.t_start, \ + "Given spike trains are not defined on the same interval!" + assert spike_train1.t_end == spike_train2.t_end, \ + "Given spike trains are not defined on the same interval!" + + # cython implementation + try: + from .cython.cython_directionality import \ + spike_train_order_profile_cython as \ + spike_train_order_profile_impl + except ImportError: + # raise NotImplementedError() + if not(pyspike.disable_backend_warning): + print("Warning: spike_distance_cython not found. Make sure that \ +PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ +Falling back to slow python backend.") + # use python backend + from .cython.directionality_python_backend import \ + spike_train_order_profile_python as spike_train_order_profile_impl + + if max_tau is None: + max_tau = 0.0 + + times, coincidences, multiplicity \ + = spike_train_order_profile_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + + return DiscreteFunc(times, coincidences, multiplicity) + + +############################################################ +# spike_train_order_profile_multi +############################################################ +def spike_train_order_profile_multi(spike_trains, indices=None, + max_tau=None): + """ Computes the multi-variate spike train order profile for a set of + spike trains. For each spike in the set of spike trains, the multi-variate + profile is defined as the sum of asymmetry values divided by the number of + spike trains pairs involving the spike train of containing this spike, + which is the number of spike trains minus one (N-1). + + :param spike_trains: list of :class:`pyspike.SpikeTrain` + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The multi-variate spike sync profile :math:`(t)` + :rtype: :class:`pyspike.function.DiscreteFunction` + """ + prof_func = partial(spike_train_order_profile_bi, max_tau=max_tau) + average_prof, M = _generic_profile_multi(spike_trains, prof_func, + indices) + return average_prof + + + +############################################################ +# _spike_train_order_impl +############################################################ +def _spike_train_order_impl(spike_train1, spike_train2, + interval=None, max_tau=None): + """ Implementation of bi-variatae spike train order value (Synfire Indicator). + + :param spike_train1: First spike train. + :type spike_train1: :class:`pyspike.SpikeTrain` + :param spike_train2: Second spike train. + :type spike_train2: :class:`pyspike.SpikeTrain` + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike train order value (Synfire Indicator) + """ + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from .cython.cython_directionality import \ + spike_train_order_cython as spike_train_order_func + if max_tau is None: + max_tau = 0.0 + c, mp = spike_train_order_func(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + except ImportError: + # Cython backend not available: fall back to profile averaging + c, mp = spike_train_order_profile(spike_train1, spike_train2, + max_tau=max_tau).integral(interval) + return c, mp + else: + # some specific interval is provided: not yet implemented + raise NotImplementedError("Parameter `interval` not supported.") + + +############################################################ +# spike_train_order +############################################################ +def spike_train_order(*args, **kwargs): + """ Computes the spike train order (Synfire Indicator) of the given + spike trains. + + Valid call structures:: + + spike_train_order(st1, st2, normalize=True) # normalized bi-variate + # spike train order + spike_train_order(st1, st2, st3) # multi-variate result of 3 spike trains + + spike_trains = [st1, st2, st3, st4] # list of spike trains + spike_train_order(spike_trains) # result for the list of spike trains + spike_train_order(spike_trains, indices=[0, 1]) # use only the spike trains + # given by the indices + + Additonal arguments: + - `max_tau` Upper bound for coincidence window, `default=None`. + - `normalize` Flag indicating if the reslut should be normalized by the + number of spikes , default=`False` + + + :returns: The spike train order value (Synfire Indicator) + """ + if len(args) == 1: + return spike_train_order_multi(args[0], **kwargs) + elif len(args) == 2: + return spike_train_order_bi(args[0], args[1], **kwargs) + else: + return spike_train_order_multi(args, **kwargs) + + +############################################################ +# spike_train_order_bi +############################################################ +def spike_train_order_bi(spike_train1, spike_train2, normalize=True, + interval=None, max_tau=None): + """ Computes the overall spike train order value (Synfire Indicator) + for two spike trains. + + :param spike_train1: First spike train. + :type spike_train1: :class:`pyspike.SpikeTrain` + :param spike_train2: Second spike train. + :type spike_train2: :class:`pyspike.SpikeTrain` + :param normalize: Normalize by the number of spikes (multiplicity). + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: The spike train order value (Synfire Indicator) + """ + c, mp = _spike_train_order_impl(spike_train1, spike_train2, interval, max_tau) + if normalize: + return 1.0*c/mp + else: + return c + +############################################################ +# spike_train_order_multi +############################################################ +def spike_train_order_multi(spike_trains, indices=None, normalize=True, + interval=None, max_tau=None): + """ Computes the overall spike train order value (Synfire Indicator) + for many spike trains. + + :param spike_trains: list of :class:`.SpikeTrain` + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :param normalize: Normalize by the number of spike (multiplicity). + :param interval: averaging interval given as a pair of floats, if None + the average over the whole function is computed. + :type interval: Pair of floats or None. + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound. + :returns: Spike train order values (Synfire Indicator) F for the given spike trains. + :rtype: double + """ + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + e_total = 0.0 + m_total = 0.0 + for (i, j) in pairs: + e, m = _spike_train_order_impl(spike_trains[i], spike_trains[j], + interval, max_tau) + e_total += e + m_total += m + + if m == 0.0: + return 1.0 + else: + return e_total/m_total + + + +############################################################ +# optimal_spike_train_sorting_from_matrix +############################################################ +def _optimal_spike_train_sorting_from_matrix(D, full_output=False): + """ Finds the best sorting via simulated annealing. + Returns the optimal permutation p and A value. + Not for direct use, call :func:`.optimal_spike_train_sorting` instead. + + :param D: The directionality (Spike-ORDER) matrix. + :param full_output: If true, then function will additionally return the + number of performed iterations (default=False) + :return: (p, F) - tuple with the optimal permutation and synfire indicator. + if `full_output=True` , (p, F, iter) is returned. + """ + N = len(D) + A = np.sum(np.triu(D, 0)) + + p = np.arange(N) + + T_start = 2*np.max(D) # starting temperature + T_end = 1E-5 * T_start # final temperature + alpha = 0.9 # cooling factor + + try: + from .cython.cython_simulated_annealing import sim_ann_cython as sim_ann + except ImportError: + raise NotImplementedError("PySpike with Cython required for computing spike train" + " sorting!") + + p, A, total_iter = sim_ann(D, T_start, T_end, alpha) + + if full_output: + return p, A, total_iter + else: + return p, A + + +############################################################ +# optimal_spike_train_sorting +############################################################ +def optimal_spike_train_sorting(spike_trains, indices=None, interval=None, + max_tau=None, full_output=False): + """ Finds the best sorting of the given spike trains by computing the spike + directionality matrix and optimize the order using simulated annealing. + For a detailed description of the algorithm see: + `http://iopscience.iop.org/article/10.1088/1367-2630/aa68c3/meta` + + :param spike_trains: list of :class:`.SpikeTrain` + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :param interval: time interval filter given as a pair of floats, if None + the full spike trains are used (default=None). + :type interval: Pair of floats or None. + :param max_tau: Maximum coincidence window size. If 0 or `None`, the + coincidence window has no upper bound (default=None). + :param full_output: If true, then function will additionally return the + number of performed iterations (default=False) + :return: (p, F) - tuple with the optimal permutation and synfire indicator. + if `full_output=True` , (p, F, iter) is returned. + """ + D = spike_directionality_matrix(spike_trains, normalize=False, + indices=indices, interval=interval, + max_tau=max_tau) + return _optimal_spike_train_sorting_from_matrix(D, full_output) + +############################################################ +# permutate_matrix +############################################################ +def permutate_matrix(D, p): + """ Helper function that applies the permutation p to the columns and rows + of matrix D. Return the permutated matrix :math:`D'[n,m] = D[p[n], p[m]]`. + + :param D: The matrix. + :param d: The permutation. + :return: The permuated matrix D', ie :math:`D'[n,m] = D[p[n], p[m]]` + """ + N = len(D) + D_p = np.empty_like(D) + for n in range(N): + for m in range(N): + D_p[n, m] = D[p[n], p[m]] + return D_p diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 80f7805..95ef454 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import numpy as np from functools import partial import pyspike -from pyspike import DiscreteFunc +from pyspike import DiscreteFunc, SpikeTrain from pyspike.generic import _generic_profile_multi, _generic_distance_matrix @@ -45,9 +45,9 @@ def spike_sync_profile(*args, **kwargs): if len(args) == 1: return spike_sync_profile_multi(args[0], **kwargs) elif len(args) == 2: - return spike_sync_profile_bi(args[0], args[1]) + return spike_sync_profile_bi(args[0], args[1], **kwargs) else: - return spike_sync_profile_multi(args) + return spike_sync_profile_multi(args, **kwargs) ############################################################ @@ -290,3 +290,52 @@ def spike_sync_matrix(spike_trains, indices=None, interval=None, max_tau=None): dist_func = partial(spike_sync_bi, max_tau=max_tau) return _generic_distance_matrix(spike_trains, dist_func, indices, interval) + + +############################################################ +# filter_by_spike_sync +############################################################ +def filter_by_spike_sync(spike_trains, threshold, indices=None, max_tau=None, + return_removed_spikes=False): + """ Removes the spikes with a multi-variate spike_sync value below + threshold. + """ + N = len(spike_trains) + filtered_spike_trains = [] + removed_spike_trains = [] + + # cython implementation + try: + from .cython.cython_profiles import coincidence_single_profile_cython \ + as coincidence_impl + except ImportError: + if not(pyspike.disable_backend_warning): + print("Warning: coincidence_single_profile_cython not found. Make \ +sure that PySpike is installed by running\n \ +'python setup.py build_ext --inplace'!\n \ +Falling back to slow python backend.") + # use python backend + from .cython.python_backend import coincidence_single_python \ + as coincidence_impl + + if max_tau is None: + max_tau = 0.0 + + for i, st in enumerate(spike_trains): + coincidences = np.zeros_like(st) + for j in range(N): + if i == j: + continue + coincidences += coincidence_impl(st.spikes, spike_trains[j].spikes, + st.t_start, st.t_end, max_tau) + filtered_spikes = st[coincidences > threshold*(N-1)] + filtered_spike_trains.append(SpikeTrain(filtered_spikes, + [st.t_start, st.t_end])) + if return_removed_spikes: + removed_spikes = st[coincidences <= threshold*(N-1)] + removed_spike_trains.append(SpikeTrain(removed_spikes, + [st.t_start, st.t_end])) + if return_removed_spikes: + return [filtered_spike_trains, removed_spike_trains] + else: + return filtered_spike_trains diff --git a/setup.py b/setup.py index 5b9e677..b5b01a6 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,9 @@ class numpy_include(object): if os.path.isfile("pyspike/cython/cython_add.c") and \ os.path.isfile("pyspike/cython/cython_profiles.c") and \ - os.path.isfile("pyspike/cython/cython_distances.c"): + os.path.isfile("pyspike/cython/cython_distances.c") and \ + os.path.isfile("pyspike/cython/cython_directionality.c") and \ + os.path.isfile("pyspike/cython/cython_simulated_annealing.c"): use_c = True else: use_c = False @@ -45,7 +47,11 @@ if use_cython: # Cython is available, compile .pyx -> .c Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.pyx"]), Extension("pyspike.cython.cython_distances", - ["pyspike/cython/cython_distances.pyx"]) + ["pyspike/cython/cython_distances.pyx"]), + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.pyx"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.pyx"]) ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries @@ -55,14 +61,18 @@ elif use_c: # c files are there, compile to binaries Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.c"]), Extension("pyspike.cython.cython_distances", - ["pyspike/cython/cython_distances.c"]) + ["pyspike/cython/cython_distances.c"]), + Extension("pyspike.cython.cython_directionality", + ["pyspike/cython/cython_directionality.c"]), + Extension("pyspike.cython.cython_simulated_annealing", + ["pyspike/cython/cython_simulated_annealing.c"]) ] # neither cython nor c files available -> automatic fall-back to python backend setup( name='pyspike', packages=find_packages(exclude=['doc']), - version='0.5.3', + version='0.6.0', cmdclass=cmdclass, ext_modules=ext_modules, include_dirs=[numpy_include()], @@ -90,11 +100,17 @@ train similarity', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' - ] + ], + package_data={ + 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', + 'cython/cython_distances.c', + 'cython/cython_directionality.c', + 'cython/cython_simulated_annealing.c'], + 'test': ['Spike_testdata.txt'] + } ) diff --git a/test/test_directionality.py b/test/test_directionality.py new file mode 100644 index 0000000..c2e9bfe --- /dev/null +++ b/test/test_directionality.py @@ -0,0 +1,97 @@ +""" test_directionality.py + +Tests the directionality functions + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_equal + +import pyspike as spk +from pyspike import SpikeTrain, DiscreteFunc + + +def test_spike_directionality(): + st1 = SpikeTrain([100, 200, 300], [0, 1000]) + st2 = SpikeTrain([105, 205, 300], [0, 1000]) + assert_almost_equal(spk.spike_directionality(st1, st2), 2.0/3.0) + assert_almost_equal(spk.spike_directionality(st1, st2, normalize=False), + 2.0) + + # exchange order of spike trains should give exact negative profile + assert_almost_equal(spk.spike_directionality(st2, st1), -2.0/3.0) + assert_almost_equal(spk.spike_directionality(st2, st1, normalize=False), + -2.0) + + st3 = SpikeTrain([105, 195, 500], [0, 1000]) + assert_almost_equal(spk.spike_directionality(st1, st3), 0.0) + assert_almost_equal(spk.spike_directionality(st1, st3, normalize=False), + 0.0) + assert_almost_equal(spk.spike_directionality(st3, st1), 0.0) + + D = spk.spike_directionality_matrix([st1, st2, st3], normalize=False) + D_expected = np.array([[0, 2.0, 0.0], [-2.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) + assert_array_equal(D, D_expected) + + dir_profs = spk.spike_directionality_values([st1, st2, st3]) + assert_array_equal(dir_profs[0], [1.0, 0.0, 0.0]) + assert_array_equal(dir_profs[1], [-0.5, -1.0, 0.0]) + + +def test_spike_train_order(): + st1 = SpikeTrain([100, 200, 300], [0, 1000]) + st2 = SpikeTrain([105, 205, 300], [0, 1000]) + st3 = SpikeTrain([105, 195, 500], [0, 1000]) + + expected_x12 = np.array([0, 100, 105, 200, 205, 300, 1000]) + expected_y12 = np.array([1, 1, 1, 1, 1, 0, 0]) + expected_mp12 = np.array([1, 1, 1, 1, 1, 2, 2]) + + f = spk.spike_train_order_profile(st1, st2) + + assert f.almost_equal(DiscreteFunc(expected_x12, expected_y12, + expected_mp12)) + assert_almost_equal(f.avrg(), 2.0/3.0) + assert_almost_equal(f.avrg(normalize=False), 4.0) + assert_almost_equal(spk.spike_train_order(st1, st2), 2.0/3.0) + assert_almost_equal(spk.spike_train_order(st1, st2, normalize=False), 4.0) + + expected_x23 = np.array([0, 105, 195, 205, 300, 500, 1000]) + expected_y23 = np.array([0, 0, -1, -1, 0, 0, 0]) + expected_mp23 = np.array([2, 2, 1, 1, 1, 1, 1]) + + f = spk.spike_train_order_profile(st2, st3) + + assert_array_equal(f.x, expected_x23) + assert_array_equal(f.y, expected_y23) + assert_array_equal(f.mp, expected_mp23) + assert f.almost_equal(DiscreteFunc(expected_x23, expected_y23, + expected_mp23)) + assert_almost_equal(f.avrg(), -1.0/3.0) + assert_almost_equal(f.avrg(normalize=False), -2.0) + assert_almost_equal(spk.spike_train_order(st2, st3), -1.0/3.0) + assert_almost_equal(spk.spike_train_order(st2, st3, normalize=False), -2.0) + + f = spk.spike_train_order_profile_multi([st1, st2, st3]) + + expected_x = np.array([0, 100, 105, 195, 200, 205, 300, 500, 1000]) + expected_y = np.array([2, 2, 2, -2, 0, 0, 0, 0, 0]) + expected_mp = np.array([2, 2, 4, 2, 2, 2, 4, 2, 2]) + + assert_array_equal(f.x, expected_x) + assert_array_equal(f.y, expected_y) + assert_array_equal(f.mp, expected_mp) + + # Averaging the profile should be the same as computing the synfire indicator directly. + assert_almost_equal(f.avrg(), spk.spike_train_order([st1, st2, st3])) + + # We can also compute the synfire indicator from the Directionality Matrix: + D_matrix = spk.spike_directionality_matrix([st1, st2, st3], normalize=False) + num_spikes = np.sum(len(st) for st in [st1, st2, st3]) + syn_fire = np.sum(np.triu(D_matrix)) / num_spikes + assert_almost_equal(f.avrg(), syn_fire) diff --git a/test/test_function.py b/test/test_function.py index 92d378d..6c04839 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,6 +10,7 @@ Distributed under the BSD License from __future__ import print_function import numpy as np from copy import copy +from nose.tools import raises from numpy.testing import assert_equal, assert_almost_equal, \ assert_array_equal, assert_array_almost_equal @@ -49,6 +50,8 @@ def test_pwc(): assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) a = f.avrg([1.5, 3.5]) assert_almost_equal(a, (-0.5*0.5+0.5*1.5+1.0*0.75)/2.0, decimal=16) + a = f.avrg([1.0, 2.0]) + assert_almost_equal(a, (1.0*-0.5)/1.0, decimal=16) a = f.avrg([1.0, 3.5]) assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16) a = f.avrg([1.0, 4.0]) @@ -120,6 +123,53 @@ def test_pwc_avrg(): assert_array_almost_equal(f1.x, x_expected, decimal=16) assert_array_almost_equal(f1.y, y_expected, decimal=16) +def test_pwc_integral(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + + # test full interval + full = 1.0*1.0 + 1.0*-0.5 + 0.5*1.5 + 1.5*0.75; + assert_equal(f1.integral(), full) + assert_equal(f1.integral((np.min(x),np.max(x))), full) + # test part interval, spanning an edge + assert_equal(f1.integral((0.5,1.5)), 0.5*1.0 + 0.5*-0.5) + # test part interval, just over two edges + assert_almost_equal(f1.integral((1.0-1e-16,2+1e-16)), 1.0*-0.5, decimal=14) + # test part interval, between two edges + assert_equal(f1.integral((1.0,2.0)), 1.0*-0.5) + assert_equal(f1.integral((1.2,1.7)), (1.7-1.2)*-0.5) + # test part interval, start to before and after edge + assert_equal(f1.integral((0.0,0.7)), 0.7*1.0) + assert_equal(f1.integral((0.0,1.1)), 1.0*1.0+0.1*-0.5) + # test part interval, before and after edge till end + assert_equal(f1.integral((2.6,4.0)), (4.0-2.6)*0.75) + assert_equal(f1.integral((2.4,4.0)), (2.5-2.4)*1.5+(4-2.5)*0.75) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_inv(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((3,2)) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_oob_1(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((1,6)) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_oob_2(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((-1,3)) def test_pwl(): x = [0.0, 1.0, 2.0, 2.5, 4.0] @@ -162,6 +212,18 @@ def test_pwl(): a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.45 + 0.75 + 1.5*0.5) / 3.0, decimal=16) + # interval between support points + a = f.avrg([1.1, 1.5]) + assert_almost_equal(a, (-0.5+0.1*0.1 - 0.45) * 0.5, decimal=14) + + # starting at a support point + a = f.avrg([1.0, 1.5]) + assert_almost_equal(a, (-0.5 - 0.45) * 0.5, decimal=14) + + # start and end at support point + a = f.avrg([1.0, 2.0]) + assert_almost_equal(a, (-0.5 - 0.4) * 0.5, decimal=14) + # averaging over multiple intervals a = f.avrg([(0.5, 1.5), (1.5, 2.5)]) assert_almost_equal(a, (1.375*0.5 - 0.45 + 0.75)/2.0, decimal=16) diff --git a/test/test_sync_filter.py b/test/test_sync_filter.py new file mode 100644 index 0000000..e259903 --- /dev/null +++ b/test/test_sync_filter.py @@ -0,0 +1,95 @@ +""" test_sync_filter.py + +Tests the spike sync based filtering + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +from __future__ import print_function +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_almost_equal + +import pyspike as spk +from pyspike import SpikeTrain + + +def test_single_prof(): + st1 = np.array([1.0, 2.0, 3.0, 4.0]) + st2 = np.array([1.1, 2.1, 3.8]) + st3 = np.array([0.9, 3.1, 4.1]) + + # cython implementation + try: + from pyspike.cython.cython_profiles import \ + coincidence_single_profile_cython as coincidence_impl + except ImportError: + from pyspike.cython.python_backend import \ + coincidence_single_python as coincidence_impl + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st2, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st2, 0, 5.0, 0.0)) + print(coincidences) + for i, t in enumerate(st1): + assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], + "At index %d" % i) + + coincidences = np.array(coincidence_impl(st2, st1, 0, 5.0, 0.0)) + for i, t in enumerate(st2): + assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], + "At index %d" % i) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st3, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st3, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + assert_equal(coincidences[i], sync_prof.y[sync_prof.x == t], + "At index %d" % i) + + st1 = np.array([1.0, 2.0, 3.0, 4.0]) + st2 = np.array([1.0, 2.0, 4.0]) + + sync_prof = spk.spike_sync_profile(SpikeTrain(st1, 5.0), + SpikeTrain(st2, 5.0)) + + coincidences = np.array(coincidence_impl(st1, st2, 0, 5.0, 0.0)) + for i, t in enumerate(st1): + expected = sync_prof.y[sync_prof.x == t]/sync_prof.mp[sync_prof.x == t] + assert_equal(coincidences[i], expected, + "At index %d" % i) + + +def test_filter(): + st1 = SpikeTrain(np.array([1.0, 2.0, 3.0, 4.0]), 5.0) + st2 = SpikeTrain(np.array([1.1, 2.1, 3.8]), 5.0) + st3 = SpikeTrain(np.array([0.9, 3.1, 4.1]), 5.0) + + # filtered_spike_trains = spk.filter_by_spike_sync([st1, st2], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.0, 2.0, 4.0]) + # assert_equal(filtered_spike_trains[1].spikes, [1.1, 2.1, 3.8]) + + # filtered_spike_trains = spk.filter_by_spike_sync([st2, st1], 0.5) + + # assert_equal(filtered_spike_trains[0].spikes, [1.1, 2.1, 3.8]) + # assert_equal(filtered_spike_trains[1].spikes, [1.0, 2.0, 4.0]) + + filtered_spike_trains = spk.filter_by_spike_sync([st1, st2, st3], 0.75) + + for st in filtered_spike_trains: + print(st.spikes) + + assert_equal(filtered_spike_trains[0].spikes, [1.0, 4.0]) + assert_equal(filtered_spike_trains[1].spikes, [1.1, 3.8]) + assert_equal(filtered_spike_trains[2].spikes, [0.9, 4.1]) + + +if __name__ == "main": + test_single_prof() + test_filter() -- cgit v1.2.3