From f9529c78538882879a07cb67e342eade8d2153ab Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 15 Sep 2014 17:01:13 +0200 Subject: isi distance and basic example --- pyspike/spikes.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 pyspike/spikes.py (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py new file mode 100644 index 0000000..42b6501 --- /dev/null +++ b/pyspike/spikes.py @@ -0,0 +1,18 @@ +""" spikes.py + +Module containing several function to load and transform spike trains + +Copyright 2014, Mario Mulansky +""" + +import numpy as np + +def spike_train_from_string(s, sep=' '): + """ Converts a string of times into a SpikeTrain object. + Params: + - s: the string with (ordered) spike times + - sep: The separator between the time numbers. + Returns: + - array of spike times + """ + return np.fromstring(s, sep=sep) -- cgit v1.2.3 From 5ea0fc218bb3bb30b1c40dd20e2e35a8bd11151c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 15 Sep 2014 17:31:55 +0200 Subject: +merge_spike_trains --- examples/test_merge.py | 23 +++++++++++++++++++++++ pyspike/__init__.py | 2 +- pyspike/distances.py | 10 ---------- pyspike/spikes.py | 26 +++++++++++++++++++++++++- 4 files changed, 49 insertions(+), 12 deletions(-) create mode 100644 examples/test_merge.py (limited to 'pyspike/spikes.py') diff --git a/examples/test_merge.py b/examples/test_merge.py new file mode 100644 index 0000000..1186062 --- /dev/null +++ b/examples/test_merge.py @@ -0,0 +1,23 @@ +# compute the isi distance of some test data +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +import pyspike as spk + +# first load the data +spike_trains = [] +spike_file = open("SPIKY_testdata.txt", 'r') +for line in spike_file: + spike_trains.append(spk.spike_train_from_string(line)) + +spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) + +print(spikes) + +plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') +plt.plot(spike_trains[1], np.ones_like(spike_trains[1]), 'x') +plt.plot(spikes, 2*np.ones_like(spikes), 'o') + +plt.show() diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 6651eb5..6895bd8 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -2,4 +2,4 @@ __all__ = ["function", "distances", "spikes"] from function import PieceWiseConstFunc from distances import isi_distance -from spikes import spike_train_from_string +from spikes import spike_train_from_string, merge_spike_trains diff --git a/pyspike/distances.py b/pyspike/distances.py index d9790dc..7044a52 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -9,16 +9,6 @@ import numpy as np from pyspike import PieceWiseConstFunc -def spike_train_from_string(s, sep=' '): - """ Converts a string of times into a SpikeTrain object. - Params: - - s: the string with (ordered) spike times - - sep: The separator between the time numbers. - Returns: - - array of spike times - """ - return np.fromstring(s, sep=sep) - def isi_distance(spikes1, spikes2, T_end, T_start=0.0): """ Computes the instantaneous isi-distance S_isi (t) of the two given spike trains. diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 42b6501..66ef554 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -8,7 +8,7 @@ Copyright 2014, Mario Mulansky import numpy as np def spike_train_from_string(s, sep=' '): - """ Converts a string of times into a SpikeTrain object. + """ Converts a string of times into an array of spike times. Params: - s: the string with (ordered) spike times - sep: The separator between the time numbers. @@ -16,3 +16,27 @@ def spike_train_from_string(s, sep=' '): - array of spike times """ return np.fromstring(s, sep=sep) + + +def merge_spike_trains( spike_trains ): + """ Merges a number of spike trains into a single spike train. + Params: + - spike_trains: list of arrays of spike times + Returns: + - array with the merged spike times + """ + # get the lengths of the spike trains + lens = np.array([len(st) for st in spike_trains]) + merged_spikes = np.empty(np.sum(lens)) + index = 0 + indices = np.zeros_like(lens) + vals = [spike_trains[i][indices[i]] for i in xrange(len(indices))] + while len(indices) > 0: + i = np.argmin(vals) + merged_spikes[index] = vals[i] + index += 1 + indices[i] += 1 + if indices[i] >= lens[i]: + indices = np.delete(indices, i) + vals = [spike_trains[i][indices[i]] for i in xrange(len(indices))] + return merged_spikes -- cgit v1.2.3 From eb076dcd9d76ed3b848c78fb067c1ad6a1d6da23 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 16 Sep 2014 15:07:10 +0200 Subject: added merge spikes test --- pyspike/distances.py | 2 +- pyspike/spikes.py | 28 +++++++++++++++------------ test/SPIKY_testdata.txt | 40 ++++++++++++++++++++++++++++++++++++++ test/test_merge_spikes.py | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 13 deletions(-) create mode 100755 test/SPIKY_testdata.txt create mode 100644 test/test_merge_spikes.py (limited to 'pyspike/spikes.py') diff --git a/pyspike/distances.py b/pyspike/distances.py index 7044a52..f4989c8 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -1,6 +1,6 @@ """ distances.py -Module containing several function to compute spike distances +Module containing several functions to compute spike distances Copyright 2014, Mario Mulansky """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 66ef554..6b2eea3 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -18,7 +18,7 @@ def spike_train_from_string(s, sep=' '): return np.fromstring(s, sep=sep) -def merge_spike_trains( spike_trains ): +def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. Params: - spike_trains: list of arrays of spike times @@ -28,15 +28,19 @@ def merge_spike_trains( spike_trains ): # get the lengths of the spike trains lens = np.array([len(st) for st in spike_trains]) merged_spikes = np.empty(np.sum(lens)) - index = 0 - indices = np.zeros_like(lens) - vals = [spike_trains[i][indices[i]] for i in xrange(len(indices))] - while len(indices) > 0: - i = np.argmin(vals) - merged_spikes[index] = vals[i] - index += 1 - indices[i] += 1 - if indices[i] >= lens[i]: - indices = np.delete(indices, i) - vals = [spike_trains[i][indices[i]] for i in xrange(len(indices))] + index = 0 # the index for merged_spikes + indices = np.zeros_like(lens) # indices of the spike trains + index_list = np.arange(len(indices)) # indices of indices of spike trains + # that have not yet reached the end + # list of the possible events in the spike trains + vals = [spike_trains[i][indices[i]] for i in index_list] + while len(index_list) > 0: + i = np.argmin(vals) # the next spike is the minimum + merged_spikes[index] = vals[i] # put it to the merged spike train + i = index_list[i] + index += 1 # next index of merged spike train + indices[i] += 1 # next index for the chosen spike train + if indices[i] >= lens[i]: # remove spike train index if ended + index_list = index_list[index_list != i] + vals = [spike_trains[i][indices[i]] for i in index_list] return merged_spikes diff --git a/test/SPIKY_testdata.txt b/test/SPIKY_testdata.txt new file mode 100755 index 0000000..8fa3fcf --- /dev/null +++ b/test/SPIKY_testdata.txt @@ -0,0 +1,40 @@ +64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 +65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 +69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 +59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 +59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 +66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 +66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 +63.764 318.45 697.48 936.97 1059.3 1325 1687.9 1944.7 2132.5 2377.1 2713.1 2976.6 3196.8 3442.6 3741.6 3998.3 +63.906 314.79 693.26 937.12 1065.9 1315.8 1584.3 1821.5 2126.3 2396.8 2709.1 2967 3197.4 3444 3732.8 3849.5 +69.493 316.62 689.81 943.62 1071.9 1296.3 1654.8 1931.9 2127.5 2390.6 2708.9 2950.4 3194.8 3445.2 3670.1 3903.3 +61.789 317.53 555.82 813.15 1198.7 1448.7 1686.7 1943.5 2060.7 2311.4 2658.2 2900.2 3167.4 3418.2 3617.3 3771 +64.098 309.86 567.27 813.91 1182 1464.3 1576.8 1822.5 2063.1 2311.7 2655.8 2911.7 3168.3 3418.2 3586.4 3999.7 +68.59 315.5 559.52 806.23 1182.5 1441.1 1567.2 1804.8 2074.9 2315.8 2655.1 2913.2 3165.9 3419.5 3648.1 3884.4 +66.507 314.42 556.42 814.83 1182.5 1440.3 1701.3 1911.1 2069.7 2319.3 2662.3 2903.2 3167.4 3418.5 3545 3893.9 +72.744 318.45 554.4 819.64 1186.9 1449.7 1676 1957.4 2051.4 2302.8 2657.8 2916.2 3169.4 3416.7 3570.4 3884.8 +64.779 324.42 560.56 828.99 1174.8 1439.9 1563.7 1790.6 2067.7 2287.6 2657.4 2905.2 3139.2 3389.1 3507.8 3807.5 +64.852 316.63 568.89 815.61 1198.3 1454.1 1710.6 1933.9 2091.5 2309.6 2660.9 2907.5 3137.2 3389.3 3617.2 +63.089 314.52 553.8 827.06 1183.9 1457.6 1558.9 1808.3 2064.5 2337.1 2653.6 2897 3143.7 3385.7 3668.7 3803.8 +62.23 315.16 564.35 812.15 1199.6 1448.9 1562.7 1839.1 2069.7 2308.9 2649.6 2919.7 3141 3389.9 3723.6 3882.2 +69.662 311.93 564.91 805.25 1209.7 1451.4 1691.9 1932.1 2044.2 2329.4 2657.1 2908.5 3142.8 3390.5 3597.3 3991.1 +183.42 431.34 562.41 809.57 1086.3 1308.9 1555.9 1831.3 2057 2326.9 2591.3 2831.4 3113.9 3367.9 3555.3 3956 +188.49 442.39 572.4 810.76 1065 1326.7 1564.3 1803.4 2060.4 2322.4 2607.2 2824.1 3110.2 3363.9 3644.1 3819.6 +177 437.76 569.82 819.66 1064.1 1309.2 1685.7 1957.5 2066.9 2313.8 2593.2 2847 3116.8 3364.5 3727.3 3881.6 +193.9 441.93 586.9 804.98 1062.5 1312.4 1542.4 1793.1 2073.9 2314.7 2587.8 2845.9 3112.4 3359.8 +193.01 440.26 555.64 814.08 1056.3 1315 1689.9 1961.4 2049.1 2305 2593.9 2847.5 3110.6 3361.1 3711.6 3914.7 +194.71 437.57 566.18 806.73 1069.2 1314.6 1682.7 1942.2 2061.8 2304.6 2607.6 2841.7 3082.9 3330.3 3679.7 3848.2 +184.88 441.22 570.92 794.35 1063.7 1309.9 1678.7 1930 2058 2321.3 2606.7 2845 3084.8 3337.3 3640 3952.1 +189.66 443.59 560.67 816.89 1070.4 1303.4 1550.1 1815.5 2057.6 2323.7 2587.1 2843.5 3086.6 3333.6 3618.2 3815.4 +190.41 440.77 568.96 808.56 1073.8 1322.1 1686.5 1952.8 2068.7 2335.7 2595.7 2845.4 3086 3333.5 3635.6 3939.3 +181.16 440.67 577.54 823.52 1052.5 1322.3 1578.4 1822.2 2079.4 2309.1 2596.9 2851.9 3083.5 3335.1 3531.2 3770.6 +181.09 434.97 687.15 943.33 1192.9 1444 1699.4 1942 2194.6 2445.9 2549.4 2785.1 3056.5 3308.2 3620.5 3932.7 +186.7 446.53 688.18 942.86 1186.1 1441.9 1688.1 1922.2 2196.6 2455.3 2534.8 2776.5 3060.3 3309.4 3514.1 3808.6 +196.76 446 681.26 948.27 1195.8 1433.1 1699 1933 2201.2 2461.4 2547.4 2777.8 3055.7 3307.1 3590.6 3952.8 +200.68 427.11 695.67 946.42 1178.6 1440.1 1538.4 1809 2199.8 2432.5 2531.6 2793.2 3056.6 3308.6 3510.6 3928.1 +190.83 429.57 698.73 931.16 1190.6 1428.9 1698.3 1935 2176.8 2424.7 2530.5 2766.9 3062 3309.7 3689.8 +181.47 441.93 682.32 943.01 1190.1 1459.1 1570.6 1819.6 2189.8 2437.9 2543.3 2782.8 3025.9 3280.2 3581 3855.9 +191.38 435.69 702.76 935.62 1188.3 1438.3 1564.2 1823.9 2191.3 2444.9 2531.9 2782.4 3030.7 3275.7 3677.7 3829.2 +191.97 433.85 686.29 932.65 1183.1 1432.7 1563.9 1826.5 2214.1 2436.8 2529.8 2778.3 3028.3 3281.8 3582 3863.4 +189.51 453.21 691.3 940.86 1180.1 1430.1 1567.1 1835 2199 2448.2 2526.7 2773.8 3030.5 3280.1 3576.2 3893.6 +190.88 435.48 692.66 940.51 1189.5 1448.9 1575.1 1824.2 2190.8 2425.9 2530.6 2783.3 3033.3 3279.5 3733 3838.9 diff --git a/test/test_merge_spikes.py b/test/test_merge_spikes.py new file mode 100644 index 0000000..3162700 --- /dev/null +++ b/test/test_merge_spikes.py @@ -0,0 +1,49 @@ +""" test_merge_spikes.py + +Tests merging spikes + +Copyright 2014, Mario Mulansky +""" +from __future__ import print_function +import numpy as np + +import pyspike as spk + +def check_merged_spikes( merged_spikes, spike_trains ): + # create a flat array with all spike events + all_spikes = np.array([]) + for spike_train in spike_trains: + all_spikes = np.append(all_spikes, spike_train) + indices = np.zeros_like(all_spikes, dtype='bool') + # check if we find all the spike events in the original spike trains + for x in merged_spikes: + i = np.where(all_spikes == x)[0][0] # the first axis and the first entry + # change to something impossible so we dont find this event again + all_spikes[i] = -1.0 + indices[i] = True + assert( indices.all() ) + +def test_merge_spike_trains(): + + # first load the data + spike_trains = [] + spike_file = open("SPIKY_testdata.txt", 'r') + for line in spike_file: + spike_trains.append(spk.spike_train_from_string(line)) + + spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) + # test if result is sorted + assert((spikes == np.sort(spikes)).all()) + # check merging + check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] ) + + spikes = spk.merge_spike_trains(spike_trains) + # test if result is sorted + assert((spikes == np.sort(spikes)).all()) + # check merging + check_merged_spikes( spikes, spike_trains ) + + +if __name__ == "main": + test_merge_spike_trains() + -- cgit v1.2.3 From e4f1c09672068e4778f7b5f3e27b47ff8986863c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 29 Sep 2014 12:55:56 +0200 Subject: +mul_scalar, tests restructured and cosmetics --- pyspike/distances.py | 14 ++++++++++++++ pyspike/function.py | 15 +++++++++++++++ pyspike/spikes.py | 6 ++++++ test/test_function.py | 39 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+) (limited to 'pyspike/spikes.py') diff --git a/pyspike/distances.py b/pyspike/distances.py index 10b1d3c..f4be625 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -9,6 +9,10 @@ import numpy as np from pyspike import PieceWiseConstFunc, PieceWiseLinFunc + +############################################################ +# add_auxiliary_spikes +############################################################ def add_auxiliary_spikes( spike_train, T_end , T_start=0.0): """ Adds spikes at the beginning (T_start) and end (T_end) of the observation interval. @@ -29,6 +33,10 @@ def add_auxiliary_spikes( spike_train, T_end , T_start=0.0): spike_train = np.append(spike_train, T_end) return spike_train + +############################################################ +# isi_distance +############################################################ def isi_distance(spikes1, spikes2): """ Computes the instantaneous isi-distance S_isi (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the @@ -95,6 +103,9 @@ def isi_distance(spikes1, spikes2): return PieceWiseConstFunc(spike_events[:index+1], isi_values[:index]) +############################################################ +# get_min_dist +############################################################ def get_min_dist(spike_time, spike_train, start_index=0): """ Returns the minimal distance |spike_time - spike_train[i]| with i>=start_index. @@ -111,6 +122,9 @@ def get_min_dist(spike_time, spike_train, start_index=0): return d +############################################################ +# spike_distance +############################################################ def spike_distance(spikes1, spikes2): """ Computes the instantaneous spike-distance S_spike (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the diff --git a/pyspike/function.py b/pyspike/function.py index b705293..3a5a01c 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -109,6 +109,13 @@ class PieceWiseConstFunc: self.x = x_new[:index+2] self.y = y_new[:index+1] + def mul_scalar(self, fac): + """ Multiplies the function with a scalar value + Params: + - fac: Value to multiply + """ + self.y *= fac + ############################################################## # PieceWiseLinFunc @@ -236,3 +243,11 @@ class PieceWiseLinFunc: self.x = x_new[:index+2] self.y1 = y1_new[:index+1] self.y2 = y2_new[:index+1] + + def mul_scalar(self, fac): + """ Multiplies the function with a scalar value + Params: + - fac: Value to multiply + """ + self.y1 *= fac + self.y2 *= fac diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6b2eea3..70b48ff 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -7,6 +7,9 @@ Copyright 2014, Mario Mulansky import numpy as np +############################################################ +# spike_train_from_string +############################################################ def spike_train_from_string(s, sep=' '): """ Converts a string of times into an array of spike times. Params: @@ -18,6 +21,9 @@ def spike_train_from_string(s, sep=' '): return np.fromstring(s, sep=sep) +############################################################ +# merge_spike_trains +############################################################ def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. Params: diff --git a/test/test_function.py b/test/test_function.py index 014ecac..7420011 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -29,6 +29,13 @@ def test_pwc(): assert_almost_equal(f.abs_avrg(), (1.0+0.5+0.5*1.5+1.5*0.75)/4.0, decimal=16) + +def test_pwc_add(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f = spk.PieceWiseConstFunc(x, y) + f1 = copy(f) x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] y = [0.5, 1.0, -0.25, 0.0, 1.5] @@ -48,6 +55,17 @@ def test_pwc(): assert_array_almost_equal(f1.x, f2.x, decimal=16) assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) +def test_pwc_mul(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f = spk.PieceWiseConstFunc(x, y) + + f.mul_scalar(1.5) + assert_array_almost_equal(f.x, x, decimal=16) + assert_array_almost_equal(f.y, 1.5*np.array(y), decimal=16) + f.mul_scalar(1.0/5.0) + assert_array_almost_equal(f.y, 1.5/5.0*np.array(y), decimal=16) + def test_pwl(): x = [0.0, 1.0, 2.0, 2.5, 4.0] @@ -67,6 +85,13 @@ def test_pwl(): abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) + +def test_pwl_add(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f = spk.PieceWiseLinFunc(x, y1, y2) + f1 = copy(f) x = [0.0, 0.75, 2.0, 2.5, 2.7, 4.0] y1 = [0.5, 1.0, -0.25, 0.0, 1.5] @@ -94,5 +119,19 @@ def test_pwl(): assert_array_almost_equal(f1.y2, 2*f2.y2, decimal=16) +def test_pwc_mul(): + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y1 = [1.0, -0.5, 1.5, 0.75] + y2 = [1.5, -0.4, 1.5, 0.25] + f = spk.PieceWiseLinFunc(x, y1, y2) + + f.mul_scalar(1.5) + assert_array_almost_equal(f.x, x, decimal=16) + assert_array_almost_equal(f.y1, 1.5*np.array(y1), decimal=16) + assert_array_almost_equal(f.y2, 1.5*np.array(y2), decimal=16) + f.mul_scalar(1.0/5.0) + assert_array_almost_equal(f.y1, 1.5/5.0*np.array(y1), decimal=16) + assert_array_almost_equal(f.y2, 1.5/5.0*np.array(y2), decimal=16) + if __name__ == "__main__": test_pwc() -- cgit v1.2.3 From a769a03d089ac0c61e2155239a28665c9316e14a Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 10 Oct 2014 17:04:04 +0200 Subject: added load_txt function, some restructuring --- Readme.md | 2 +- examples/SPIKY_testdata.txt | 3 ++ examples/test_data.py | 11 +++--- pyspike/__init__.py | 5 +-- pyspike/distances.py | 25 -------------- pyspike/spikes.py | 72 ++++++++++++++++++++++++++++++++++++-- test/SPIKY_testdata.txt | 3 ++ test/test_distance.py | 24 +++++-------- test/test_merge_spikes.py | 49 -------------------------- test/test_spikes.py | 84 +++++++++++++++++++++++++++++++++++++++++++++ 10 files changed, 176 insertions(+), 102 deletions(-) delete mode 100644 test/test_merge_spikes.py create mode 100644 test/test_spikes.py (limited to 'pyspike/spikes.py') diff --git a/Readme.md b/Readme.md index 368eef4..8b84ebd 100644 --- a/Readme.md +++ b/Readme.md @@ -1,7 +1,7 @@ # PySpike PySpike is a Python library for numerical analysis of spike train similarity. -Its core functionality are the implementation of the bivariate [ISI and SPIKE distance](http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony). +Its core functionality is the implementation of the bivariate [ISI and SPIKE distance](http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony). Additionally, it allows to compute multi-variate spike train distances, averaging and general spike train processing. All source codes are published under the liberal [MIT License](http://opensource.org/licenses/MIT). diff --git a/examples/SPIKY_testdata.txt b/examples/SPIKY_testdata.txt index 8fa3fcf..c8bea67 100755 --- a/examples/SPIKY_testdata.txt +++ b/examples/SPIKY_testdata.txt @@ -1,7 +1,10 @@ 64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 +# test comment 69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 +# empty line + 59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 diff --git a/examples/test_data.py b/examples/test_data.py index ff7b510..dcd0f20 100644 --- a/examples/test_data.py +++ b/examples/test_data.py @@ -7,17 +7,14 @@ import matplotlib.pyplot as plt import pyspike as spk -# first load the data -spike_trains = [] -spike_file = open("SPIKY_testdata.txt", 'r') -for line in spike_file: - spike_trains.append(spk.spike_train_from_string(line)) +spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=(0,4000)) # plot the spike time for (i,spikes) in enumerate(spike_trains): plt.plot(spikes, i*np.ones_like(spikes), 'o') -f = spk.isi_distance(spike_trains[0], spike_trains[1], 4000) +f = spk.isi_distance(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.figure() @@ -27,7 +24,7 @@ print("Average: %.8f" % f.avrg()) print("Absolute average: %.8f" % f.abs_avrg()) -f = spk.spike_distance(spike_trains[0], spike_trains[1], 4000) +f = spk.spike_distance(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() print(x) print(y) diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 21005e9..2703f65 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,6 +1,7 @@ __all__ = ["function", "distances", "spikes"] from function import PieceWiseConstFunc, PieceWiseLinFunc -from distances import add_auxiliary_spikes, isi_distance, spike_distance, \ +from distances import isi_distance, spike_distance, \ isi_distance_multi, spike_distance_multi, isi_distance_matrix -from spikes import spike_train_from_string, merge_spike_trains +from spikes import add_auxiliary_spikes, load_spike_trains_from_txt, \ + spike_train_from_string, merge_spike_trains diff --git a/pyspike/distances.py b/pyspike/distances.py index f78c0d4..da603ad 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -11,31 +11,6 @@ import threading from pyspike import PieceWiseConstFunc, PieceWiseLinFunc -############################################################ -# add_auxiliary_spikes -############################################################ -def add_auxiliary_spikes( spike_train, T_end , T_start=0.0): - """ Adds spikes at the beginning (T_start) and end (T_end) of the - observation interval. - Args: - - spike_train: ordered array of spike times - - T_end: end time of the observation interval - - T_start: start time of the observation interval (default 0.0) - Returns: - - spike train with additional spikes at T_start and T_end. - - """ - assert spike_train[0] >= T_start, \ - "Spike train has events before the given start time" - assert spike_train[-1] <= T_end, \ - "Spike train has events after the given end time" - if spike_train[0] != T_start: - spike_train = np.insert(spike_train, 0, T_start) - if spike_train[-1] != T_end: - spike_train = np.append(spike_train, T_end) - return spike_train - - ############################################################ # isi_distance ############################################################ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 70b48ff..502c460 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -7,12 +7,46 @@ Copyright 2014, Mario Mulansky import numpy as np + +############################################################ +# add_auxiliary_spikes +############################################################ +def add_auxiliary_spikes(spike_train, time_interval): + """ Adds spikes at the beginning and end of the given time interval. + Args: + - spike_train: ordered array of spike times + - time_interval: A pair (T_start, T_end) of values representing the start + and end time of the spike train measurement or a single value representing + the end time, the T_start is then assuemd as 0. Auxiliary spikes will be + added to the spike train at the beginning and end of this interval. + Returns: + - spike train with additional spikes at T_start and T_end. + + """ + try: + T_start = time_interval[0] + T_end = time_interval[1] + except: + T_start = 0 + T_end = time_interval + + assert spike_train[0] >= T_start, \ + "Spike train has events before the given start time" + assert spike_train[-1] <= T_end, \ + "Spike train has events after the given end time" + if spike_train[0] != T_start: + spike_train = np.insert(spike_train, 0, T_start) + if spike_train[-1] != T_end: + spike_train = np.append(spike_train, T_end) + return spike_train + + ############################################################ # spike_train_from_string ############################################################ def spike_train_from_string(s, sep=' '): """ Converts a string of times into an array of spike times. - Params: + Args: - s: the string with (ordered) spike times - sep: The separator between the time numbers. Returns: @@ -21,12 +55,46 @@ def spike_train_from_string(s, sep=' '): return np.fromstring(s, sep=sep) +############################################################ +# load_spike_trains_txt +############################################################ +def load_spike_trains_from_txt(file_name, time_interval=None, + separator=' ', comment='#'): + """ Loads a number of spike trains from a text file. Each line of the text + file should contain one spike train as a sequence of spike times separated + by `separator`. Empty lines as well as lines starting with `comment` are + neglected. The `time_interval` represents the start and the end of the spike + trains and it is used to add auxiliary spikes at the beginning and end of + each spike train. However, if `time_interval == None`, no auxiliary spikes + are added, but note that the Spike and ISI distance both require auxiliary + spikes. + Args: + - file_name: The name of the text file. + - time_interval: A pair (T_start, T_end) of values representing the start + and end time of the spike train measurement or a single value representing + the end time, the T_start is then assuemd as 0. Auxiliary spikes will be + added to the spike train at the beginning and end of this interval. + - separator: The character used to seprate the values in the text file. + - comment: Lines starting with this character are ignored. + """ + spike_trains = [] + spike_file = open(file_name, 'r') + for line in spike_file: + if len(line) > 1 and not line.startswith(comment): + # use only the lines with actual data and not commented + spike_train = spike_train_from_string(line) + if not time_interval == None: # add auxiliary spikes if times given + spike_train = add_auxiliary_spikes(spike_train, time_interval) + spike_trains.append(spike_train) + return spike_trains + + ############################################################ # merge_spike_trains ############################################################ def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. - Params: + Args: - spike_trains: list of arrays of spike times Returns: - array with the merged spike times diff --git a/test/SPIKY_testdata.txt b/test/SPIKY_testdata.txt index 8fa3fcf..c8bea67 100755 --- a/test/SPIKY_testdata.txt +++ b/test/SPIKY_testdata.txt @@ -1,7 +1,10 @@ 64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 +# test comment 69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 +# empty line + 59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 diff --git a/test/test_distance.py b/test/test_distance.py index c43f0b3..92b99ae 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -13,14 +13,6 @@ from numpy.testing import assert_equal, assert_array_almost_equal import pyspike as spk -def test_auxiliary_spikes(): - t = np.array([0.2, 0.4, 0.6, 0.7]) - t_aux = spk.add_auxiliary_spikes(t, T_end=1.0, T_start=0.1) - assert_equal(t_aux, [0.1, 0.2, 0.4, 0.6, 0.7, 1.0]) - t_aux = spk.add_auxiliary_spikes(t_aux, 1.0) - assert_equal(t_aux, [0.0, 0.1, 0.2, 0.4, 0.6, 0.7, 1.0]) - - def test_isi(): # generate two spike trains: t1 = np.array([0.2, 0.4, 0.6, 0.7]) @@ -31,8 +23,8 @@ def test_isi(): expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, -0.25/0.35, -0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] - t1 = spk.add_auxiliary_spikes(t1, 1.0) - t2 = spk.add_auxiliary_spikes(t2, 1.0) + t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0)) + t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0)) f = spk.isi_distance(t1, t2) # print("ISI: ", f.y) @@ -47,8 +39,8 @@ def test_isi(): expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0] expected_isi = [0.1/0.2, -0.1/0.3, -0.1/0.3, 0.1/0.2, 0.1/0.2, -0.0/0.5] - t1 = spk.add_auxiliary_spikes(t1, 1.0) - t2 = spk.add_auxiliary_spikes(t2, 1.0) + t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0)) + t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0)) f = spk.isi_distance(t1, t2) assert_equal(f.x, expected_times) @@ -72,8 +64,8 @@ def test_spike(): expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) - t1 = spk.add_auxiliary_spikes(t1, 1.0) - t2 = spk.add_auxiliary_spikes(t2, 1.0) + t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0)) + t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0)) f = spk.spike_distance(t1, t2) assert_equal(f.x, expected_times) @@ -92,8 +84,8 @@ def test_spike(): expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) - t1 = spk.add_auxiliary_spikes(t1, 1.0) - t2 = spk.add_auxiliary_spikes(t2, 1.0) + t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0)) + t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0)) f = spk.spike_distance(t1, t2) assert_equal(f.x, expected_times) diff --git a/test/test_merge_spikes.py b/test/test_merge_spikes.py deleted file mode 100644 index 3162700..0000000 --- a/test/test_merge_spikes.py +++ /dev/null @@ -1,49 +0,0 @@ -""" test_merge_spikes.py - -Tests merging spikes - -Copyright 2014, Mario Mulansky -""" -from __future__ import print_function -import numpy as np - -import pyspike as spk - -def check_merged_spikes( merged_spikes, spike_trains ): - # create a flat array with all spike events - all_spikes = np.array([]) - for spike_train in spike_trains: - all_spikes = np.append(all_spikes, spike_train) - indices = np.zeros_like(all_spikes, dtype='bool') - # check if we find all the spike events in the original spike trains - for x in merged_spikes: - i = np.where(all_spikes == x)[0][0] # the first axis and the first entry - # change to something impossible so we dont find this event again - all_spikes[i] = -1.0 - indices[i] = True - assert( indices.all() ) - -def test_merge_spike_trains(): - - # first load the data - spike_trains = [] - spike_file = open("SPIKY_testdata.txt", 'r') - for line in spike_file: - spike_trains.append(spk.spike_train_from_string(line)) - - spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) - # test if result is sorted - assert((spikes == np.sort(spikes)).all()) - # check merging - check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] ) - - spikes = spk.merge_spike_trains(spike_trains) - # test if result is sorted - assert((spikes == np.sort(spikes)).all()) - # check merging - check_merged_spikes( spikes, spike_trains ) - - -if __name__ == "main": - test_merge_spike_trains() - diff --git a/test/test_spikes.py b/test/test_spikes.py new file mode 100644 index 0000000..dca580f --- /dev/null +++ b/test/test_spikes.py @@ -0,0 +1,84 @@ +""" test_load.py + +Test loading of spike trains from text files + +Copyright 2014, Mario Mulansky +""" + +from __future__ import print_function +import numpy as np +from numpy.testing import assert_equal + +import pyspike as spk + + +def test_auxiliary_spikes(): + t = np.array([0.2, 0.4, 0.6, 0.7]) + t_aux = spk.add_auxiliary_spikes(t, time_interval=(0.1, 1.0)) + assert_equal(t_aux, [0.1, 0.2, 0.4, 0.6, 0.7, 1.0]) + t_aux = spk.add_auxiliary_spikes(t_aux, time_interval=(0.0, 1.0)) + assert_equal(t_aux, [0.0, 0.1, 0.2, 0.4, 0.6, 0.7, 1.0]) + + +def test_load_from_txt(): + spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=(0,4000)) + assert len(spike_trains) == 40 + + # check the first spike train + spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1, + 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7, + 3644.3, 3936.3, 4000] + assert_equal(spike_times, spike_trains[0]) + + # check auxiliary spikes + for spike_train in spike_trains: + assert spike_train[0] == 0.0 + assert spike_train[-1] == 4000 + + # load without adding auxiliary spikes + spike_trains2 = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=None) + assert len(spike_trains2) == 40 + # check auxiliary spikes + for i in xrange(len(spike_trains)): + assert len(spike_trains[i]) == len(spike_trains2[i])+2 # two spikes less + + +def check_merged_spikes( merged_spikes, spike_trains ): + # create a flat array with all spike events + all_spikes = np.array([]) + for spike_train in spike_trains: + all_spikes = np.append(all_spikes, spike_train) + indices = np.zeros_like(all_spikes, dtype='bool') + # check if we find all the spike events in the original spike trains + for x in merged_spikes: + i = np.where(all_spikes == x)[0][0] # the first axis and the first entry + # change to something impossible so we dont find this event again + all_spikes[i] = -1.0 + indices[i] = True + assert( indices.all() ) + + +def test_merge_spike_trains(): + # first load the data + spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=(0,4000)) + + spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) + # test if result is sorted + assert((spikes == np.sort(spikes)).all()) + # check merging + check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] ) + + spikes = spk.merge_spike_trains(spike_trains) + # test if result is sorted + assert((spikes == np.sort(spikes)).all()) + # check merging + check_merged_spikes( spikes, spike_trains ) + +if __name__ == "main": + test_auxiliary_spikes() + test_load_from_txt() + test_merge_spike_trains() + -- cgit v1.2.3 From c1c5403b8274bd19aa1e71933cfaefe1ba622e59 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 10 Oct 2014 17:23:28 +0200 Subject: added License note in headers --- examples/isi_matrix.py | 11 +++++++++++ examples/merge.py | 28 ++++++++++++++++++++++++++++ examples/plot.py | 42 ++++++++++++++++++++++++++++++++++++++++++ examples/test_data.py | 34 ---------------------------------- examples/test_merge.py | 20 -------------------- pyspike/__init__.py | 6 ++++++ pyspike/cython_distance.pyx | 3 +++ pyspike/distances.py | 2 ++ pyspike/function.py | 2 ++ pyspike/python_backend.py | 4 +++- pyspike/spikes.py | 2 ++ test/test_distance.py | 3 +++ test/test_function.py | 2 ++ 13 files changed, 104 insertions(+), 55 deletions(-) create mode 100644 examples/merge.py create mode 100644 examples/plot.py delete mode 100644 examples/test_data.py delete mode 100644 examples/test_merge.py (limited to 'pyspike/spikes.py') diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index 0d6e185..3297d3d 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -1,3 +1,14 @@ +""" isi_matrix.py + +Simple example showing how to compute the isi distance matrix of a set of spike +trains. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + + from __future__ import print_function import numpy as np diff --git a/examples/merge.py b/examples/merge.py new file mode 100644 index 0000000..55c7f0a --- /dev/null +++ b/examples/merge.py @@ -0,0 +1,28 @@ +""" merge.py + +Simple example showing the merging of two spike trains. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +import pyspike as spk + +# first load the data, ending time = 4000 +spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", 4000) + +spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) + +print(spikes) + +plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') +plt.plot(spike_trains[1], np.ones_like(spike_trains[1]), 'x') +plt.plot(spikes, 2*np.ones_like(spikes), 'o') + +plt.show() diff --git a/examples/plot.py b/examples/plot.py new file mode 100644 index 0000000..d7e2173 --- /dev/null +++ b/examples/plot.py @@ -0,0 +1,42 @@ +""" plot.py + +Simple example showing how to load and plot spike trains and their distances. + +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + + +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +import pyspike as spk + +spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + time_interval=(0,4000)) + +# plot the spike time +for (i,spikes) in enumerate(spike_trains): + plt.plot(spikes, i*np.ones_like(spikes), 'o') + +f = spk.isi_distance(spike_trains[0], spike_trains[1]) +x, y = f.get_plottable_data() + +plt.figure() +plt.plot(x, np.abs(y), '--k') + +print("Average: %.8f" % f.avrg()) +print("Absolute average: %.8f" % f.abs_avrg()) + + +f = spk.spike_distance(spike_trains[0], spike_trains[1]) +x, y = f.get_plottable_data() +print(x) +print(y) +#plt.figure() +plt.plot(x, y, '-b') + +plt.show() diff --git a/examples/test_data.py b/examples/test_data.py deleted file mode 100644 index dcd0f20..0000000 --- a/examples/test_data.py +++ /dev/null @@ -1,34 +0,0 @@ -# compute the isi distance of some test data - -from __future__ import print_function - -import numpy as np -import matplotlib.pyplot as plt - -import pyspike as spk - -spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", - time_interval=(0,4000)) - -# plot the spike time -for (i,spikes) in enumerate(spike_trains): - plt.plot(spikes, i*np.ones_like(spikes), 'o') - -f = spk.isi_distance(spike_trains[0], spike_trains[1]) -x, y = f.get_plottable_data() - -plt.figure() -plt.plot(x, np.abs(y), '--k') - -print("Average: %.8f" % f.avrg()) -print("Absolute average: %.8f" % f.abs_avrg()) - - -f = spk.spike_distance(spike_trains[0], spike_trains[1]) -x, y = f.get_plottable_data() -print(x) -print(y) -#plt.figure() -plt.plot(x, y, '-b') - -plt.show() diff --git a/examples/test_merge.py b/examples/test_merge.py deleted file mode 100644 index 0c34608..0000000 --- a/examples/test_merge.py +++ /dev/null @@ -1,20 +0,0 @@ -# compute the isi distance of some test data -from __future__ import print_function - -import numpy as np -import matplotlib.pyplot as plt - -import pyspike as spk - -# first load the data, ending time = 4000 -spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", 4000) - -spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) - -print(spikes) - -plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') -plt.plot(spike_trains[1], np.ones_like(spike_trains[1]), 'x') -plt.plot(spikes, 2*np.ones_like(spikes), 'o') - -plt.show() diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 2703f65..3867e6e 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,3 +1,9 @@ +""" +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) +""" + __all__ = ["function", "distances", "spikes"] from function import PieceWiseConstFunc, PieceWiseLinFunc diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index 2be8525..4ab4381 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -11,6 +11,9 @@ Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects improves the performance of spike_distance by a factor of 10! Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) + """ """ diff --git a/pyspike/distances.py b/pyspike/distances.py index da603ad..db04c4e 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -3,6 +3,8 @@ Module containing several functions to compute spike distances Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ import numpy as np diff --git a/pyspike/function.py b/pyspike/function.py index 5444c36..243ef67 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -5,6 +5,8 @@ functions. Copyright 2014, Mario Mulansky +Distributed under the MIT License (MIT) + """ from __future__ import print_function diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index 9134149..e5b74e9 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -3,7 +3,9 @@ Collection of python functions that can be used instead of the cython implementation. -Copyright 2014, Mario Mulanksy +Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 502c460..9375e30 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -3,6 +3,8 @@ Module containing several function to load and transform spike trains Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ import numpy as np diff --git a/test/test_distance.py b/test/test_distance.py index 84d0af9..dafe693 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -3,6 +3,9 @@ Tests the isi- and spike-distance computation Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) + """ from __future__ import print_function diff --git a/test/test_function.py b/test/test_function.py index 7420011..c0fb3fd 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -3,6 +3,8 @@ Tests the PieceWiseConst and PieceWiseLinear functions Copyright 2014, Mario Mulansky + +Distributed under the MIT License (MIT) """ from __future__ import print_function -- cgit v1.2.3 From adc1e91c89aeecf3ee1743d3595b282061a22573 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sun, 12 Oct 2014 18:49:37 +0200 Subject: added sort to load function, renamed test data file --- examples/PySpike_testdata.txt | 6 +++--- pyspike/spikes.py | 15 ++++++++++----- test/PySpike_testdata.txt | 43 +++++++++++++++++++++++++++++++++++++++++++ test/SPIKY_testdata.txt | 43 ------------------------------------------- test/test_spikes.py | 8 ++++---- 5 files changed, 60 insertions(+), 55 deletions(-) create mode 100755 test/PySpike_testdata.txt delete mode 100755 test/SPIKY_testdata.txt (limited to 'pyspike/spikes.py') diff --git a/examples/PySpike_testdata.txt b/examples/PySpike_testdata.txt index c8bea67..41b2362 100755 --- a/examples/PySpike_testdata.txt +++ b/examples/PySpike_testdata.txt @@ -1,10 +1,10 @@ +# PySpike exemplary spike trains +# lines starting with # are ignored, just like empty lines + 64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 -# test comment 69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 -# empty line - 59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 9375e30..6ea94de 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -46,22 +46,26 @@ def add_auxiliary_spikes(spike_train, time_interval): ############################################################ # spike_train_from_string ############################################################ -def spike_train_from_string(s, sep=' '): +def spike_train_from_string(s, sep=' ', sort=True): """ Converts a string of times into an array of spike times. Args: - s: the string with (ordered) spike times - - sep: The separator between the time numbers. + - sep: The separator between the time numbers, default=' '. + - sort: If True, the spike times are order via `np.sort`, default=True. Returns: - array of spike times """ - return np.fromstring(s, sep=sep) + if sort: + return np.sort(np.fromstring(s, sep=sep)) + else: + return np.fromstring(s, sep=sep) ############################################################ # load_spike_trains_txt ############################################################ def load_spike_trains_from_txt(file_name, time_interval=None, - separator=' ', comment='#'): + separator=' ', comment='#', sort=True): """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated by `separator`. Empty lines as well as lines starting with `comment` are @@ -78,13 +82,14 @@ def load_spike_trains_from_txt(file_name, time_interval=None, added to the spike train at the beginning and end of this interval. - separator: The character used to seprate the values in the text file. - comment: Lines starting with this character are ignored. + - sort: If true, the spike times are order via `np.sort`, default=True. """ spike_trains = [] spike_file = open(file_name, 'r') for line in spike_file: if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line) + spike_train = spike_train_from_string(line, separator, sort) if not time_interval == None: # add auxiliary spikes if times given spike_train = add_auxiliary_spikes(spike_train, time_interval) spike_trains.append(spike_train) diff --git a/test/PySpike_testdata.txt b/test/PySpike_testdata.txt new file mode 100755 index 0000000..c8bea67 --- /dev/null +++ b/test/PySpike_testdata.txt @@ -0,0 +1,43 @@ +64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 +65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 +# test comment +69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 +59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 +# empty line + +59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 +66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 +66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 +63.764 318.45 697.48 936.97 1059.3 1325 1687.9 1944.7 2132.5 2377.1 2713.1 2976.6 3196.8 3442.6 3741.6 3998.3 +63.906 314.79 693.26 937.12 1065.9 1315.8 1584.3 1821.5 2126.3 2396.8 2709.1 2967 3197.4 3444 3732.8 3849.5 +69.493 316.62 689.81 943.62 1071.9 1296.3 1654.8 1931.9 2127.5 2390.6 2708.9 2950.4 3194.8 3445.2 3670.1 3903.3 +61.789 317.53 555.82 813.15 1198.7 1448.7 1686.7 1943.5 2060.7 2311.4 2658.2 2900.2 3167.4 3418.2 3617.3 3771 +64.098 309.86 567.27 813.91 1182 1464.3 1576.8 1822.5 2063.1 2311.7 2655.8 2911.7 3168.3 3418.2 3586.4 3999.7 +68.59 315.5 559.52 806.23 1182.5 1441.1 1567.2 1804.8 2074.9 2315.8 2655.1 2913.2 3165.9 3419.5 3648.1 3884.4 +66.507 314.42 556.42 814.83 1182.5 1440.3 1701.3 1911.1 2069.7 2319.3 2662.3 2903.2 3167.4 3418.5 3545 3893.9 +72.744 318.45 554.4 819.64 1186.9 1449.7 1676 1957.4 2051.4 2302.8 2657.8 2916.2 3169.4 3416.7 3570.4 3884.8 +64.779 324.42 560.56 828.99 1174.8 1439.9 1563.7 1790.6 2067.7 2287.6 2657.4 2905.2 3139.2 3389.1 3507.8 3807.5 +64.852 316.63 568.89 815.61 1198.3 1454.1 1710.6 1933.9 2091.5 2309.6 2660.9 2907.5 3137.2 3389.3 3617.2 +63.089 314.52 553.8 827.06 1183.9 1457.6 1558.9 1808.3 2064.5 2337.1 2653.6 2897 3143.7 3385.7 3668.7 3803.8 +62.23 315.16 564.35 812.15 1199.6 1448.9 1562.7 1839.1 2069.7 2308.9 2649.6 2919.7 3141 3389.9 3723.6 3882.2 +69.662 311.93 564.91 805.25 1209.7 1451.4 1691.9 1932.1 2044.2 2329.4 2657.1 2908.5 3142.8 3390.5 3597.3 3991.1 +183.42 431.34 562.41 809.57 1086.3 1308.9 1555.9 1831.3 2057 2326.9 2591.3 2831.4 3113.9 3367.9 3555.3 3956 +188.49 442.39 572.4 810.76 1065 1326.7 1564.3 1803.4 2060.4 2322.4 2607.2 2824.1 3110.2 3363.9 3644.1 3819.6 +177 437.76 569.82 819.66 1064.1 1309.2 1685.7 1957.5 2066.9 2313.8 2593.2 2847 3116.8 3364.5 3727.3 3881.6 +193.9 441.93 586.9 804.98 1062.5 1312.4 1542.4 1793.1 2073.9 2314.7 2587.8 2845.9 3112.4 3359.8 +193.01 440.26 555.64 814.08 1056.3 1315 1689.9 1961.4 2049.1 2305 2593.9 2847.5 3110.6 3361.1 3711.6 3914.7 +194.71 437.57 566.18 806.73 1069.2 1314.6 1682.7 1942.2 2061.8 2304.6 2607.6 2841.7 3082.9 3330.3 3679.7 3848.2 +184.88 441.22 570.92 794.35 1063.7 1309.9 1678.7 1930 2058 2321.3 2606.7 2845 3084.8 3337.3 3640 3952.1 +189.66 443.59 560.67 816.89 1070.4 1303.4 1550.1 1815.5 2057.6 2323.7 2587.1 2843.5 3086.6 3333.6 3618.2 3815.4 +190.41 440.77 568.96 808.56 1073.8 1322.1 1686.5 1952.8 2068.7 2335.7 2595.7 2845.4 3086 3333.5 3635.6 3939.3 +181.16 440.67 577.54 823.52 1052.5 1322.3 1578.4 1822.2 2079.4 2309.1 2596.9 2851.9 3083.5 3335.1 3531.2 3770.6 +181.09 434.97 687.15 943.33 1192.9 1444 1699.4 1942 2194.6 2445.9 2549.4 2785.1 3056.5 3308.2 3620.5 3932.7 +186.7 446.53 688.18 942.86 1186.1 1441.9 1688.1 1922.2 2196.6 2455.3 2534.8 2776.5 3060.3 3309.4 3514.1 3808.6 +196.76 446 681.26 948.27 1195.8 1433.1 1699 1933 2201.2 2461.4 2547.4 2777.8 3055.7 3307.1 3590.6 3952.8 +200.68 427.11 695.67 946.42 1178.6 1440.1 1538.4 1809 2199.8 2432.5 2531.6 2793.2 3056.6 3308.6 3510.6 3928.1 +190.83 429.57 698.73 931.16 1190.6 1428.9 1698.3 1935 2176.8 2424.7 2530.5 2766.9 3062 3309.7 3689.8 +181.47 441.93 682.32 943.01 1190.1 1459.1 1570.6 1819.6 2189.8 2437.9 2543.3 2782.8 3025.9 3280.2 3581 3855.9 +191.38 435.69 702.76 935.62 1188.3 1438.3 1564.2 1823.9 2191.3 2444.9 2531.9 2782.4 3030.7 3275.7 3677.7 3829.2 +191.97 433.85 686.29 932.65 1183.1 1432.7 1563.9 1826.5 2214.1 2436.8 2529.8 2778.3 3028.3 3281.8 3582 3863.4 +189.51 453.21 691.3 940.86 1180.1 1430.1 1567.1 1835 2199 2448.2 2526.7 2773.8 3030.5 3280.1 3576.2 3893.6 +190.88 435.48 692.66 940.51 1189.5 1448.9 1575.1 1824.2 2190.8 2425.9 2530.6 2783.3 3033.3 3279.5 3733 3838.9 diff --git a/test/SPIKY_testdata.txt b/test/SPIKY_testdata.txt deleted file mode 100755 index c8bea67..0000000 --- a/test/SPIKY_testdata.txt +++ /dev/null @@ -1,43 +0,0 @@ -64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3 -65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944 -# test comment -69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1 -59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9 -# empty line - -59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9 -66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9 -66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2 -63.764 318.45 697.48 936.97 1059.3 1325 1687.9 1944.7 2132.5 2377.1 2713.1 2976.6 3196.8 3442.6 3741.6 3998.3 -63.906 314.79 693.26 937.12 1065.9 1315.8 1584.3 1821.5 2126.3 2396.8 2709.1 2967 3197.4 3444 3732.8 3849.5 -69.493 316.62 689.81 943.62 1071.9 1296.3 1654.8 1931.9 2127.5 2390.6 2708.9 2950.4 3194.8 3445.2 3670.1 3903.3 -61.789 317.53 555.82 813.15 1198.7 1448.7 1686.7 1943.5 2060.7 2311.4 2658.2 2900.2 3167.4 3418.2 3617.3 3771 -64.098 309.86 567.27 813.91 1182 1464.3 1576.8 1822.5 2063.1 2311.7 2655.8 2911.7 3168.3 3418.2 3586.4 3999.7 -68.59 315.5 559.52 806.23 1182.5 1441.1 1567.2 1804.8 2074.9 2315.8 2655.1 2913.2 3165.9 3419.5 3648.1 3884.4 -66.507 314.42 556.42 814.83 1182.5 1440.3 1701.3 1911.1 2069.7 2319.3 2662.3 2903.2 3167.4 3418.5 3545 3893.9 -72.744 318.45 554.4 819.64 1186.9 1449.7 1676 1957.4 2051.4 2302.8 2657.8 2916.2 3169.4 3416.7 3570.4 3884.8 -64.779 324.42 560.56 828.99 1174.8 1439.9 1563.7 1790.6 2067.7 2287.6 2657.4 2905.2 3139.2 3389.1 3507.8 3807.5 -64.852 316.63 568.89 815.61 1198.3 1454.1 1710.6 1933.9 2091.5 2309.6 2660.9 2907.5 3137.2 3389.3 3617.2 -63.089 314.52 553.8 827.06 1183.9 1457.6 1558.9 1808.3 2064.5 2337.1 2653.6 2897 3143.7 3385.7 3668.7 3803.8 -62.23 315.16 564.35 812.15 1199.6 1448.9 1562.7 1839.1 2069.7 2308.9 2649.6 2919.7 3141 3389.9 3723.6 3882.2 -69.662 311.93 564.91 805.25 1209.7 1451.4 1691.9 1932.1 2044.2 2329.4 2657.1 2908.5 3142.8 3390.5 3597.3 3991.1 -183.42 431.34 562.41 809.57 1086.3 1308.9 1555.9 1831.3 2057 2326.9 2591.3 2831.4 3113.9 3367.9 3555.3 3956 -188.49 442.39 572.4 810.76 1065 1326.7 1564.3 1803.4 2060.4 2322.4 2607.2 2824.1 3110.2 3363.9 3644.1 3819.6 -177 437.76 569.82 819.66 1064.1 1309.2 1685.7 1957.5 2066.9 2313.8 2593.2 2847 3116.8 3364.5 3727.3 3881.6 -193.9 441.93 586.9 804.98 1062.5 1312.4 1542.4 1793.1 2073.9 2314.7 2587.8 2845.9 3112.4 3359.8 -193.01 440.26 555.64 814.08 1056.3 1315 1689.9 1961.4 2049.1 2305 2593.9 2847.5 3110.6 3361.1 3711.6 3914.7 -194.71 437.57 566.18 806.73 1069.2 1314.6 1682.7 1942.2 2061.8 2304.6 2607.6 2841.7 3082.9 3330.3 3679.7 3848.2 -184.88 441.22 570.92 794.35 1063.7 1309.9 1678.7 1930 2058 2321.3 2606.7 2845 3084.8 3337.3 3640 3952.1 -189.66 443.59 560.67 816.89 1070.4 1303.4 1550.1 1815.5 2057.6 2323.7 2587.1 2843.5 3086.6 3333.6 3618.2 3815.4 -190.41 440.77 568.96 808.56 1073.8 1322.1 1686.5 1952.8 2068.7 2335.7 2595.7 2845.4 3086 3333.5 3635.6 3939.3 -181.16 440.67 577.54 823.52 1052.5 1322.3 1578.4 1822.2 2079.4 2309.1 2596.9 2851.9 3083.5 3335.1 3531.2 3770.6 -181.09 434.97 687.15 943.33 1192.9 1444 1699.4 1942 2194.6 2445.9 2549.4 2785.1 3056.5 3308.2 3620.5 3932.7 -186.7 446.53 688.18 942.86 1186.1 1441.9 1688.1 1922.2 2196.6 2455.3 2534.8 2776.5 3060.3 3309.4 3514.1 3808.6 -196.76 446 681.26 948.27 1195.8 1433.1 1699 1933 2201.2 2461.4 2547.4 2777.8 3055.7 3307.1 3590.6 3952.8 -200.68 427.11 695.67 946.42 1178.6 1440.1 1538.4 1809 2199.8 2432.5 2531.6 2793.2 3056.6 3308.6 3510.6 3928.1 -190.83 429.57 698.73 931.16 1190.6 1428.9 1698.3 1935 2176.8 2424.7 2530.5 2766.9 3062 3309.7 3689.8 -181.47 441.93 682.32 943.01 1190.1 1459.1 1570.6 1819.6 2189.8 2437.9 2543.3 2782.8 3025.9 3280.2 3581 3855.9 -191.38 435.69 702.76 935.62 1188.3 1438.3 1564.2 1823.9 2191.3 2444.9 2531.9 2782.4 3030.7 3275.7 3677.7 3829.2 -191.97 433.85 686.29 932.65 1183.1 1432.7 1563.9 1826.5 2214.1 2436.8 2529.8 2778.3 3028.3 3281.8 3582 3863.4 -189.51 453.21 691.3 940.86 1180.1 1430.1 1567.1 1835 2199 2448.2 2526.7 2773.8 3030.5 3280.1 3576.2 3893.6 -190.88 435.48 692.66 940.51 1189.5 1448.9 1575.1 1824.2 2190.8 2425.9 2530.6 2783.3 3033.3 3279.5 3733 3838.9 diff --git a/test/test_spikes.py b/test/test_spikes.py index 456ed62..e008207 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -23,7 +23,7 @@ def test_auxiliary_spikes(): def test_load_from_txt(): - spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", time_interval=(0,4000)) assert len(spike_trains) == 40 @@ -39,7 +39,7 @@ def test_load_from_txt(): assert spike_train[-1] == 4000 # load without adding auxiliary spikes - spike_trains2 = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + spike_trains2 = spk.load_spike_trains_from_txt("PySpike_testdata.txt", time_interval=None) assert len(spike_trains2) == 40 # check auxiliary spikes @@ -59,12 +59,12 @@ def check_merged_spikes( merged_spikes, spike_trains ): # change to something impossible so we dont find this event again all_spikes[i] = -1.0 indices[i] = True - assert( indices.all() ) + assert indices.all() def test_merge_spike_trains(): # first load the data - spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", time_interval=(0,4000)) spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) -- cgit v1.2.3 From 4274c328a4927b392036d1c3b759b0787b05f300 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 13 Oct 2014 10:47:18 +0200 Subject: code formatting following PEP8 --- examples/isi_matrix.py | 2 -- examples/plot.py | 6 ++-- pyspike/distances.py | 77 ++++++++++++++++++++++++----------------------- pyspike/function.py | 35 +++++++++++---------- pyspike/python_backend.py | 72 ++++++++++++++++++++++---------------------- pyspike/spikes.py | 48 ++++++++++++++--------------- test/test_distance.py | 37 ++++++++++++----------- test/test_function.py | 28 ++++++++++------- test/test_spikes.py | 27 ++++++++--------- 9 files changed, 168 insertions(+), 164 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index 2a4d075..db740dd 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -11,7 +11,6 @@ Distributed under the MIT License (MIT) from __future__ import print_function -import numpy as np import matplotlib.pyplot as plt import pyspike as spk @@ -25,4 +24,3 @@ m = spk.isi_distance_matrix(spike_trains) plt.imshow(m, interpolation='none') plt.show() - diff --git a/examples/plot.py b/examples/plot.py index 4ff75c4..5c3ad4a 100644 --- a/examples/plot.py +++ b/examples/plot.py @@ -15,11 +15,11 @@ import matplotlib.pyplot as plt import pyspike as spk -spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) +spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) # plot the spike time -for (i,spikes) in enumerate(spike_trains): +for (i, spikes) in enumerate(spike_trains): plt.plot(spikes, i*np.ones_like(spikes), 'o') f = spk.isi_distance(spike_trains[0], spike_trains[1]) diff --git a/pyspike/distances.py b/pyspike/distances.py index db04c4e..b2eec92 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -17,7 +17,7 @@ from pyspike import PieceWiseConstFunc, PieceWiseLinFunc # isi_distance ############################################################ def isi_distance(spikes1, spikes2): - """ Computes the instantaneous isi-distance S_isi (t) of the two given + """ Computes the instantaneous isi-distance S_isi (t) of the two given spike trains. The spike trains are expected to have auxiliary spikes at the beginning and end of the interval. Use the function add_auxiliary_spikes to add those spikes to the spike train. @@ -27,9 +27,9 @@ def isi_distance(spikes1, spikes2): - PieceWiseConstFunc describing the isi-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # cython implementation @@ -53,9 +53,9 @@ def spike_distance(spikes1, spikes2): - PieceWiseLinFunc describing the spike-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # cython implementation @@ -74,33 +74,33 @@ def multi_distance(spike_trains, pair_distance_func, indices=None): use isi_distance_multi or spike_distance_multi instead. Computes the multi-variate distance for a set of spike-trains using the - pair_dist_func to compute pair-wise distances. That is it computes the + pair_dist_func to compute pair-wise distances. That is it computes the average distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i,j}, where the sum goes over all pairs . Args: - spike_trains: list of spike trains - pair_distance_func: function computing the distance of two spike trains - - indices: list of indices defining which spike trains to use, + - indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) Returns: - The averaged multi-variate distance of all pairs """ - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] # start with first pair - (i,j) = pairs[0] + (i, j) = pairs[0] average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - for (i,j) in pairs[1:]: + for (i, j) in pairs[1:]: current_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - average_dist.add(current_dist) # add to the average - average_dist.mul_scalar(1.0/len(pairs)) # normalize + average_dist.add(current_dist) # add to the average + average_dist.mul_scalar(1.0/len(pairs)) # normalize return average_dist @@ -113,45 +113,46 @@ def multi_distance_par(spike_trains, pair_distance_func, indices=None): """ num_threads = 2 - lock = threading.Lock() + def run(spike_trains, index_pairs, average_dist): - (i,j) = index_pairs[0] + (i, j) = index_pairs[0] # print(i,j) this_avrg = pair_distance_func(spike_trains[i], spike_trains[j]) - for (i,j) in index_pairs[1:]: + for (i, j) in index_pairs[1:]: # print(i,j) current_dist = pair_distance_func(spike_trains[i], spike_trains[j]) this_avrg.add(current_dist) with lock: - average_dist.add(this_avrg) + average_dist.add(this_avrg) - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] num_pairs = len(pairs) # start with first pair - (i,j) = pairs[0] + (i, j) = pairs[0] average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) # remove the one we already computed pairs = pairs[1:] # distribute the rest into num_threads pieces - clustered_pairs = [ pairs[i::num_threads] for i in xrange(num_threads) ] + clustered_pairs = [pairs[n::num_threads] for n in xrange(num_threads)] threads = [] for pairs in clustered_pairs: - t = threading.Thread(target=run, args=(spike_trains, pairs, average_dist)) + t = threading.Thread(target=run, args=(spike_trains, pairs, + average_dist)) threads.append(t) t.start() for t in threads: t.join() - average_dist.mul_scalar(1.0/num_pairs) # normalize + average_dist.mul_scalar(1.0/num_pairs) # normalize return average_dist @@ -161,11 +162,11 @@ def multi_distance_par(spike_trains, pair_distance_func, indices=None): def isi_distance_multi(spike_trains, indices=None): """ computes the multi-variate isi-distance for a set of spike-trains. That is the average isi-distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i,j}, where the sum goes over all pairs Args: - spike_trains: list of spike trains - - indices: list of indices defining which spike trains to use, + - indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) Returns: - A PieceWiseConstFunc representing the averaged isi distance S @@ -177,13 +178,13 @@ def isi_distance_multi(spike_trains, indices=None): # spike_distance_multi ############################################################ def spike_distance_multi(spike_trains, indices=None): - """ computes the multi-variate spike-distance for a set of spike-trains. + """ computes the multi-variate spike-distance for a set of spike-trains. That is the average spike-distance of all pairs of spike-trains: - S(t) = 2/((N(N-1)) sum_{} S_{i,j}, + S(t) = 2/((N(N-1)) sum_{} S_{i, j}, where the sum goes over all pairs Args: - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use, + - indices: list of indices defining which spike-trains to use, if None all given spike trains are used (default=None) Returns: - A PieceWiseLinFunc representing the averaged spike distance S @@ -198,21 +199,21 @@ def isi_distance_matrix(spike_trains, indices=None): - indices: list of indices defining which spike-trains to use if None all given spike-trains are used (default=None) Return: - - a 2D array of size len(indices)*len(indices) containing the average + - a 2D array of size len(indices)*len(indices) containing the average pair-wise isi-distance """ - if indices==None: + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) # check validity of indices assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ - "Invalid index list." + "Invalid index list." # generate a list of possible index pairs - pairs = [(i,j) for i in indices for j in indices[i+1:]] + pairs = [(i, j) for i in indices for j in indices[i+1:]] distance_matrix = np.zeros((len(indices), len(indices))) - for i,j in pairs: + for i, j in pairs: d = isi_distance(spike_trains[i], spike_trains[j]).abs_avrg() - distance_matrix[i,j] = d - distance_matrix[j,i] = d + distance_matrix[i, j] = d + distance_matrix[j, i] = d return distance_matrix diff --git a/pyspike/function.py b/pyspike/function.py index 243ef67..8107538 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -1,7 +1,7 @@ """ function.py -Module containing classes representing piece-wise constant and piece-wise linear -functions. +Module containing classes representing piece-wise constant and piece-wise +linear functions. Copyright 2014, Mario Mulansky @@ -35,7 +35,7 @@ class PieceWiseConstFunc: Args: - other: another PieceWiseConstFunc object Returns: - True if the two functions are equal up to `decimal` decimals, + True if the two functions are equal up to `decimal` decimals, False otherwise """ eps = 10.0**(-decimal) @@ -61,23 +61,23 @@ class PieceWiseConstFunc: """ Computes the average of the piece-wise const function: a = 1/T int f(x) dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ (self.x[-1]-self.x[0]) def abs_avrg(self): - """ Computes the average of the abs value of the piece-wise const + """ Computes the average of the abs value of the piece-wise const function: a = 1/T int |f(x)| dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * np.abs(self.y)) / \ (self.x[-1]-self.x[0]) def add(self, f): - """ Adds another PieceWiseConst function to this function. + """ Adds another PieceWiseConst function to this function. Note: only functions defined on the same interval can be summed. Args: - f: PieceWiseConst function to be added. @@ -87,13 +87,13 @@ class PieceWiseConstFunc: # python implementation # from python_backend import add_piece_wise_const_python - # self.x, self.y = add_piece_wise_const_python(self.x, self.y, f.x, f.y) + # self.x, self.y = add_piece_wise_const_python(self.x, self.y, + # f.x, f.y) # cython version from cython_add import add_piece_wise_const_cython self.x, self.y = add_piece_wise_const_cython(self.x, self.y, f.x, f.y) - def mul_scalar(self, fac): """ Multiplies the function with a scalar value Args: @@ -113,10 +113,10 @@ class PieceWiseLinFunc: Args: - x: array of length N+1 defining the edges of the intervals of the pwc function. - - y1: array of length N defining the function values at the left of the - intervals. - - y2: array of length N defining the function values at the right of the + - y1: array of length N defining the function values at the left of the intervals. + - y2: array of length N defining the function values at the right of + the intervals. """ self.x = np.array(x) self.y1 = np.array(y1) @@ -128,7 +128,7 @@ class PieceWiseLinFunc: Args: - other: another PieceWiseLinFunc object Returns: - True if the two functions are equal up to `decimal` decimals, + True if the two functions are equal up to `decimal` decimals, False otherwise """ eps = 10.0**(-decimal) @@ -153,7 +153,7 @@ class PieceWiseLinFunc: """ Computes the average of the piece-wise linear function: a = 1/T int f(x) dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ (self.x[-1]-self.x[0]) @@ -162,13 +162,13 @@ class PieceWiseLinFunc: """ Computes the absolute average of the piece-wise linear function: a = 1/T int |f(x)| dx where T is the length of the interval. Returns: - - the average a. + - the average a. """ return np.sum((self.x[1:]-self.x[:-1]) * 0.5 * (np.abs(self.y1)+np.abs(self.y2)))/(self.x[-1]-self.x[0]) def add(self, f): - """ Adds another PieceWiseLin function to this function. + """ Adds another PieceWiseLin function to this function. Note: only functions defined on the same interval can be summed. Args: - f: PieceWiseLin function to be added. @@ -178,7 +178,7 @@ class PieceWiseLinFunc: # python implementation # from python_backend import add_piece_wise_lin_python - # self.x, self.y1, self.y2 = add_piece_wise_lin_python( + # self.x, self.y1, self.y2 = add_piece_wise_lin_python( # self.x, self.y1, self.y2, f.x, f.y1, f.y2) # cython version @@ -186,7 +186,6 @@ class PieceWiseLinFunc: self.x, self.y1, self.y2 = add_piece_wise_lin_cython( self.x, self.y1, self.y2, f.x, f.y1, f.y2) - def mul_scalar(self, fac): """ Multiplies the function with a scalar value Args: diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index e5b74e9..cf1a92f 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -1,6 +1,6 @@ """ python_backend.py -Collection of python functions that can be used instead of the cython +Collection of python functions that can be used instead of the cython implementation. Copyright 2014, Mario Mulansky @@ -21,18 +21,18 @@ def isi_distance_python(s1, s2): """ Plain Python implementation of the isi distance. """ # compute the interspike interval - nu1 = s1[1:]-s1[:-1] - nu2 = s2[1:]-s2[:-1] - + nu1 = s1[1:] - s1[:-1] + nu2 = s2[1:] - s2[:-1] + # compute the isi-distance - spike_events = np.empty(len(nu1)+len(nu2)) + spike_events = np.empty(len(nu1) + len(nu2)) spike_events[0] = s1[0] # the values have one entry less - the number of intervals between events - isi_values = np.empty(len(spike_events)-1) + isi_values = np.empty(len(spike_events) - 1) # add the distance of the first events # isi_values[0] = nu1[0]/nu2[0] - 1.0 if nu1[0] <= nu2[0] \ # else 1.0 - nu2[0]/nu1[0] - isi_values[0] = (nu1[0]-nu2[0])/max(nu1[0],nu2[0]) + isi_values[0] = (nu1[0] - nu2[0]) / max(nu1[0], nu2[0]) index1 = 0 index2 = 0 index = 1 @@ -49,28 +49,28 @@ def isi_distance_python(s1, s2): if index2 >= len(nu2): break spike_events[index] = s2[index2] - else: # s1[index1+1] == s2[index2+1] + else: # s1[index1 + 1] == s2[index2 + 1] index1 += 1 index2 += 1 if (index1 >= len(nu1)) or (index2 >= len(nu2)): break spike_events[index] = s1[index1] # compute the corresponding isi-distance - isi_values[index] = (nu1[index1]-nu2[index2]) / \ - max(nu1[index1], nu2[index2]) + isi_values[index] = (nu1[index1] - nu2[index2]) / \ + max(nu1[index1], nu2[index2]) index += 1 # the last event is the interval end spike_events[index] = s1[-1] - # use only the data added above + # use only the data added above # could be less than original length due to equal spike times - return PieceWiseConstFunc(spike_events[:index+1], isi_values[:index]) + return PieceWiseConstFunc(spike_events[:index + 1], isi_values[:index]) ############################################################ # get_min_dist ############################################################ def get_min_dist(spike_time, spike_train, start_index=0): - """ Returns the minimal distance |spike_time - spike_train[i]| + """ Returns the minimal distance |spike_time - spike_train[i]| with i>=start_index. """ d = abs(spike_time - spike_train[start_index]) @@ -99,18 +99,18 @@ def spike_distance_python(spikes1, spikes2): - PieceWiseLinFunc describing the spike-distance. """ # check for auxiliary spikes - first and last spikes should be identical - assert spikes1[0]==spikes2[0], \ + assert spikes1[0] == spikes2[0], \ "Given spike trains seems not to have auxiliary spikes!" - assert spikes1[-1]==spikes2[-1], \ + assert spikes1[-1] == spikes2[-1], \ "Given spike trains seems not to have auxiliary spikes!" # shorter variables t1 = spikes1 t2 = spikes2 - spike_events = np.empty(len(t1)+len(t2)-2) + spike_events = np.empty(len(t1) + len(t2) - 2) spike_events[0] = t1[0] - y_starts = np.empty(len(spike_events)-1) - y_ends = np.empty(len(spike_events)-1) + y_starts = np.empty(len(spike_events) - 1) + y_ends = np.empty(len(spike_events) - 1) index1 = 0 index2 = 0 @@ -133,9 +133,10 @@ def spike_distance_python(spikes1, spikes2): break spike_events[index] = t1[index1] # first calculate the previous interval end value - dt_p1 = dt_f1 # the previous time now was the following time before + dt_p1 = dt_f1 # the previous time was the following time before s1 = dt_p1 - s2 = (dt_p2*(t2[index2+1]-t1[index1]) + dt_f2*(t1[index1]-t2[index2])) / isi2 + s2 = (dt_p2*(t2[index2+1]-t1[index1]) + + dt_f2*(t1[index1]-t2[index2])) / isi2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) # now the next interval start value dt_f1 = get_min_dist(t1[index1+1], t2, index2) @@ -148,8 +149,9 @@ def spike_distance_python(spikes1, spikes2): break spike_events[index] = t2[index2] # first calculate the previous interval end value - dt_p2 = dt_f2 # the previous time now was the following time before - s1 = (dt_p1*(t1[index1+1]-t2[index2]) + dt_f1*(t2[index2]-t1[index1])) / isi1 + dt_p2 = dt_f2 # the previous time was the following time before + s1 = (dt_p1*(t1[index1+1]-t2[index2]) + + dt_f1*(t2[index2]-t1[index1])) / isi1 s2 = dt_p2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) # now the next interval start value @@ -158,7 +160,7 @@ def spike_distance_python(spikes1, spikes2): isi2 = t2[index2+1]-t2[index2] # s2 is the same as above, thus we can compute y2 immediately y_starts[index] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) - else: # t1[index1+1] == t2[index2+1] - generate only one event + else: # t1[index1+1] == t2[index2+1] - generate only one event index1 += 1 index2 += 1 if (index1+1 >= len(t1)) or (index2+1 >= len(t2)): @@ -183,9 +185,9 @@ def spike_distance_python(spikes1, spikes2): s1 = dt_p1*(t1[-1]-t1[-2])/isi1 s2 = dt_p2*(t2[-1]-t2[-2])/isi2 y_ends[index-1] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) - # use only the data added above + # use only the data added above # could be less than original length due to equal spike times - return PieceWiseLinFunc(spike_events[:index+1], + return PieceWiseLinFunc(spike_events[:index+1], y_starts[:index], y_ends[:index]) @@ -209,7 +211,7 @@ def add_piece_wise_const_python(x1, y1, x2, y2): elif x1[index1+1] > x2[index2+1]: index2 += 1 x_new[index] = x2[index2] - else: # x1[index1+1] == x2[index2+1]: + else: # x1[index1+1] == x2[index2+1]: index1 += 1 index2 += 1 x_new[index] = x1[index1] @@ -217,15 +219,13 @@ def add_piece_wise_const_python(x1, y1, x2, y2): # one array reached the end -> copy the contents of the other to the end if index1+1 < len(y1): x_new[index+1:index+1+len(x1)-index1-1] = x1[index1+1:] - y_new[index+1:index+1+len(y1)-index1-1] = y1[index1+1:] + \ - y2[-1] + y_new[index+1:index+1+len(y1)-index1-1] = y1[index1+1:] + y2[-1] index += len(x1)-index1-2 elif index2+1 < len(y2): x_new[index+1:index+1+len(x2)-index2-1] = x2[index2+1:] - y_new[index+1:index+1+len(y2)-index2-1] = y2[index2+1:] + \ - y1[-1] + y_new[index+1:index+1+len(y2)-index2-1] = y2[index2+1:] + y1[-1] index += len(x2)-index2-2 - else: # both arrays reached the end simultaneously + else: # both arrays reached the end simultaneously # only the last x-value missing x_new[index+1] = x1[-1] # the last value is again the end of the interval @@ -244,9 +244,9 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): y2_new = np.empty_like(y1_new) x_new[0] = x1[0] y1_new[0] = y11[0] + y21[0] - index1 = 0 # index for self - index2 = 0 # index for f - index = 0 # index for new + index1 = 0 # index for self + index2 = 0 # index for f + index = 0 # index for new while (index1+1 < len(y11)) and (index2+1 < len(y21)): # print(index1+1, x1[index1+1], self.y[index1+1], x_new[index]) if x1[index1+1] < x2[index2+1]: @@ -272,7 +272,7 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): x_new[index] = x2[index2] # and the starting value for the next interval y1_new[index] = y21[index2] + y - else: # x1[index1+1] == x2[index2+1]: + else: # x1[index1+1] == x2[index2+1]: y2_new[index] = y12[index1] + y22[index2] index1 += 1 index2 += 1 @@ -297,7 +297,7 @@ def add_piece_wise_lin_python(x1, y11, y12, x2, y21, y22): y1_new[index+1:index+1+len(y21)-index2-1] = y21[index2+1:] + y y2_new[index:index+len(y22)-index2-1] = y22[index2:-1] + y index += len(x2)-index2-2 - else: # both arrays reached the end simultaneously + else: # both arrays reached the end simultaneously # only the last x-value missing x_new[index+1] = x1[-1] # finally, the end value for the last interval diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6ea94de..c496ab8 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -31,11 +31,11 @@ def add_auxiliary_spikes(spike_train, time_interval): except: T_start = 0 T_end = time_interval - + assert spike_train[0] >= T_start, \ - "Spike train has events before the given start time" + "Spike train has events before the given start time" assert spike_train[-1] <= T_end, \ - "Spike train has events after the given end time" + "Spike train has events after the given end time" if spike_train[0] != T_start: spike_train = np.insert(spike_train, 0, T_start) if spike_train[-1] != T_end: @@ -64,16 +64,16 @@ def spike_train_from_string(s, sep=' ', sort=True): ############################################################ # load_spike_trains_txt ############################################################ -def load_spike_trains_from_txt(file_name, time_interval=None, +def load_spike_trains_from_txt(file_name, time_interval=None, separator=' ', comment='#', sort=True): - """ Loads a number of spike trains from a text file. Each line of the text - file should contain one spike train as a sequence of spike times separated - by `separator`. Empty lines as well as lines starting with `comment` are - neglected. The `time_interval` represents the start and the end of the spike - trains and it is used to add auxiliary spikes at the beginning and end of - each spike train. However, if `time_interval == None`, no auxiliary spikes - are added, but note that the Spike and ISI distance both require auxiliary - spikes. + """ Loads a number of spike trains from a text file. Each line of the text + file should contain one spike train as a sequence of spike times separated + by `separator`. Empty lines as well as lines starting with `comment` are + neglected. The `time_interval` represents the start and the end of the + spike trains and it is used to add auxiliary spikes at the beginning and + end of each spike train. However, if `time_interval == None`, no auxiliary + spikes are added, but note that the Spike and ISI distance both require + auxiliary spikes. Args: - file_name: The name of the text file. - time_interval: A pair (T_start, T_end) of values representing the start @@ -87,10 +87,10 @@ def load_spike_trains_from_txt(file_name, time_interval=None, spike_trains = [] spike_file = open(file_name, 'r') for line in spike_file: - if len(line) > 1 and not line.startswith(comment): + if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented spike_train = spike_train_from_string(line, separator, sort) - if not time_interval == None: # add auxiliary spikes if times given + if time_interval is not None: # add auxil. spikes if times given spike_train = add_auxiliary_spikes(spike_train, time_interval) spike_trains.append(spike_train) return spike_trains @@ -109,19 +109,19 @@ def merge_spike_trains(spike_trains): # get the lengths of the spike trains lens = np.array([len(st) for st in spike_trains]) merged_spikes = np.empty(np.sum(lens)) - index = 0 # the index for merged_spikes - indices = np.zeros_like(lens) # indices of the spike trains - index_list = np.arange(len(indices)) # indices of indices of spike trains - # that have not yet reached the end + index = 0 # the index for merged_spikes + indices = np.zeros_like(lens) # indices of the spike trains + index_list = np.arange(len(indices)) # indices of indices of spike trains + # that have not yet reached the end # list of the possible events in the spike trains vals = [spike_trains[i][indices[i]] for i in index_list] while len(index_list) > 0: - i = np.argmin(vals) # the next spike is the minimum - merged_spikes[index] = vals[i] # put it to the merged spike train + i = np.argmin(vals) # the next spike is the minimum + merged_spikes[index] = vals[i] # put it to the merged spike train i = index_list[i] - index += 1 # next index of merged spike train - indices[i] += 1 # next index for the chosen spike train - if indices[i] >= lens[i]: # remove spike train index if ended + index += 1 # next index of merged spike train + indices[i] += 1 # next index for the chosen spike train + if indices[i] >= lens[i]: # remove spike train index if ended index_list = index_list[index_list != i] - vals = [spike_trains[i][indices[i]] for i in index_list] + vals = [spike_trains[n][indices[n]] for n in index_list] return merged_spikes diff --git a/test/test_distance.py b/test/test_distance.py index dafe693..3371cbd 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -22,8 +22,8 @@ def test_isi(): t2 = np.array([0.3, 0.45, 0.8, 0.9, 0.95]) # pen&paper calculation of the isi distance - expected_times = [0.0,0.2,0.3,0.4,0.45,0.6,0.7,0.8,0.9,0.95,1.0] - expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, + expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] + expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35, -0.25/0.35, -0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] t1 = spk.add_auxiliary_spikes(t1, 1.0) @@ -36,10 +36,10 @@ def test_isi(): assert_array_almost_equal(f.y, expected_isi, decimal=14) # check with some equal spike times - t1 = np.array([0.2,0.4,0.6]) - t2 = np.array([0.1,0.4,0.5,0.6]) + t1 = np.array([0.2, 0.4, 0.6]) + t2 = np.array([0.1, 0.4, 0.5, 0.6]) - expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0] + expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] expected_isi = [0.1/0.2, -0.1/0.3, -0.1/0.3, 0.1/0.2, 0.1/0.2, -0.0/0.5] t1 = spk.add_auxiliary_spikes(t1, 1.0) @@ -56,11 +56,11 @@ def test_spike(): t2 = np.array([0.3, 0.45, 0.8, 0.9, 0.95]) # pen&paper calculation of the spike distance - expected_times = [0.0,0.2,0.3,0.4,0.45,0.6,0.7,0.8,0.9,0.95,1.0] + expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] s1 = np.array([0.1, 0.1, (0.1*0.1+0.05*0.1)/0.2, 0.05, (0.05*0.15 * 2)/0.2, 0.15, 0.1, 0.1*0.2/0.3, 0.1**2/0.3, 0.1*0.05/0.3, 0.1]) - s2 = np.array([0.1, 0.1*0.2/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05, - (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35, + s2 = np.array([0.1, 0.1*0.2/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05, + (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35, 0.1, 0.1, 0.05, 0.05]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3]) isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05]) @@ -76,17 +76,17 @@ def test_spike(): assert_array_almost_equal(f.y2, expected_y2, decimal=14) # check with some equal spike times - t1 = np.array([0.2,0.4,0.6]) - t2 = np.array([0.1,0.4,0.5,0.6]) + t1 = np.array([0.2, 0.4, 0.6]) + t2 = np.array([0.1, 0.4, 0.5, 0.6]) - expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0] + expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] s1 = np.array([0.1, 0.1*0.1/0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) s2 = np.array([0.1*0.1/0.3, 0.1, 0.1*0.2/0.3, 0.0, 0.1, 0.0, 0.0]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4]) isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4]) expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) - + t1 = spk.add_auxiliary_spikes(t1, 1.0) t2 = spk.add_auxiliary_spikes(t2, 1.0) f = spk.spike_distance(t1, t2) @@ -100,8 +100,8 @@ def check_multi_distance(dist_func, dist_func_multi): # generate spike trains: t1 = spk.add_auxiliary_spikes(np.array([0.2, 0.4, 0.6, 0.7]), 1.0) t2 = spk.add_auxiliary_spikes(np.array([0.3, 0.45, 0.8, 0.9, 0.95]), 1.0) - t3 = spk.add_auxiliary_spikes(np.array([0.2,0.4,0.6]), 1.0) - t4 = spk.add_auxiliary_spikes(np.array([0.1,0.4,0.5,0.6]), 1.0) + t3 = spk.add_auxiliary_spikes(np.array([0.2, 0.4, 0.6]), 1.0) + t4 = spk.add_auxiliary_spikes(np.array([0.1, 0.4, 0.5, 0.6]), 1.0) spike_trains = [t1, t2, t3, t4] f12 = dist_func(t1, t2) @@ -111,17 +111,17 @@ def check_multi_distance(dist_func, dist_func_multi): f24 = dist_func(t2, t4) f34 = dist_func(t3, t4) - f_multi = dist_func_multi(spike_trains, [0,1]) + f_multi = dist_func_multi(spike_trains, [0, 1]) assert f_multi.almost_equal(f12, decimal=14) f = copy(f12) f.add(f13) f.add(f23) f.mul_scalar(1.0/3) - f_multi = dist_func_multi(spike_trains, [0,1,2]) + f_multi = dist_func_multi(spike_trains, [0, 1, 2]) assert f_multi.almost_equal(f, decimal=14) - f.mul_scalar(3) # revert above normalization + f.mul_scalar(3) # revert above normalization f.add(f14) f.add(f24) f.add(f34) @@ -139,6 +139,7 @@ def test_multi_spike(): if __name__ == "__main__": - test_auxiliary_spikes() test_isi() test_spike() + test_multi_isi() + test_multi_spike() diff --git a/test/test_function.py b/test/test_function.py index c0fb3fd..ed7d6bc 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,18 +10,18 @@ Distributed under the MIT License (MIT) from __future__ import print_function import numpy as np from copy import copy -from numpy.testing import assert_equal, assert_almost_equal, \ - assert_array_almost_equal +from numpy.testing import assert_almost_equal, assert_array_almost_equal import pyspike as spk + def test_pwc(): # some random data x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) xp, yp = f.get_plottable_data() - + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] yp_expected = [1.0, 1.0, -0.5, -0.5, 1.5, 1.5, 0.75, 0.75] assert_array_almost_equal(xp, xp_expected, decimal=16) @@ -51,17 +51,18 @@ def test_pwc_add(): f2.add(f) assert_array_almost_equal(f2.x, x_expected, decimal=16) assert_array_almost_equal(f2.y, y_expected, decimal=16) - + f1.add(f2) # same x, but y doubled assert_array_almost_equal(f1.x, f2.x, decimal=16) assert_array_almost_equal(f1.y, 2*f2.y, decimal=16) + def test_pwc_mul(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) - + f.mul_scalar(1.5) assert_array_almost_equal(f.x, x, decimal=16) assert_array_almost_equal(f.y, 1.5*np.array(y), decimal=16) @@ -75,15 +76,15 @@ def test_pwl(): y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) xp, yp = f.get_plottable_data() - + xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] yp_expected = [1.0, 1.5, -0.5, -0.4, 1.5, 1.5, 0.75, 0.25] assert_array_almost_equal(xp, xp_expected, decimal=16) assert_array_almost_equal(yp, yp_expected, decimal=16) - + avrg_expected = (1.25 - 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.avrg(), avrg_expected, decimal=16) - + abs_avrg_expected = (1.25 + 0.45 + 0.75 + 1.5*0.5) / 4.0 assert_almost_equal(f.abs_avrg(), abs_avrg_expected, decimal=16) @@ -113,7 +114,7 @@ def test_pwl_add(): assert_array_almost_equal(f2.x, x_expected, decimal=16) assert_array_almost_equal(f2.y1, y1_expected, decimal=16) assert_array_almost_equal(f2.y2, y2_expected, decimal=16) - + f1.add(f2) # same x, but y doubled assert_array_almost_equal(f1.x, f2.x, decimal=16) @@ -121,12 +122,12 @@ def test_pwl_add(): assert_array_almost_equal(f1.y2, 2*f2.y2, decimal=16) -def test_pwc_mul(): +def test_pwl_mul(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y1 = [1.0, -0.5, 1.5, 0.75] y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) - + f.mul_scalar(1.5) assert_array_almost_equal(f.x, x, decimal=16) assert_array_almost_equal(f.y1, 1.5*np.array(y1), decimal=16) @@ -137,3 +138,8 @@ def test_pwc_mul(): if __name__ == "__main__": test_pwc() + test_pwc_add() + test_pwc_mul() + test_pwl() + test_pwl_add() + test_pwl_mul() diff --git a/test/test_spikes.py b/test/test_spikes.py index e008207..349e0bf 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -23,13 +23,13 @@ def test_auxiliary_spikes(): def test_load_from_txt(): - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) assert len(spike_trains) == 40 # check the first spike train - spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1, - 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7, + spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1, + 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7, 3644.3, 3936.3, 4000] assert_equal(spike_times, spike_trains[0]) @@ -39,15 +39,15 @@ def test_load_from_txt(): assert spike_train[-1] == 4000 # load without adding auxiliary spikes - spike_trains2 = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=None) + spike_trains2 = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=None) assert len(spike_trains2) == 40 # check auxiliary spikes for i in xrange(len(spike_trains)): - assert len(spike_trains[i]) == len(spike_trains2[i])+2 # two spikes less + assert len(spike_trains[i]) == len(spike_trains2[i])+2 # 2 spikes less -def check_merged_spikes( merged_spikes, spike_trains ): +def check_merged_spikes(merged_spikes, spike_trains): # create a flat array with all spike events all_spikes = np.array([]) for spike_train in spike_trains: @@ -55,7 +55,7 @@ def check_merged_spikes( merged_spikes, spike_trains ): indices = np.zeros_like(all_spikes, dtype='bool') # check if we find all the spike events in the original spike trains for x in merged_spikes: - i = np.where(all_spikes == x)[0][0] # the first axis and the first entry + i = np.where(all_spikes == x)[0][0] # first axis and first entry # change to something impossible so we dont find this event again all_spikes[i] = -1.0 indices[i] = True @@ -64,23 +64,22 @@ def check_merged_spikes( merged_spikes, spike_trains ): def test_merge_spike_trains(): # first load the data - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0,4000)) + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) # test if result is sorted assert((spikes == np.sort(spikes)).all()) # check merging - check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] ) + check_merged_spikes(spikes, [spike_trains[0], spike_trains[1]]) spikes = spk.merge_spike_trains(spike_trains) # test if result is sorted assert((spikes == np.sort(spikes)).all()) # check merging - check_merged_spikes( spikes, spike_trains ) + check_merged_spikes(spikes, spike_trains) if __name__ == "main": test_auxiliary_spikes() test_load_from_txt() test_merge_spike_trains() - -- cgit v1.2.3 From 5ce807943fab2ba233cff661e34e4d6a83397b99 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 13 Oct 2014 11:03:42 +0200 Subject: changed to BSD license --- License | 25 ++++++++----------------- examples/isi_matrix.py | 2 +- examples/merge.py | 2 +- examples/plot.py | 2 +- pyspike/__init__.py | 2 +- pyspike/cython_distance.pyx | 2 +- pyspike/distances.py | 2 +- pyspike/function.py | 2 +- pyspike/python_backend.py | 2 +- pyspike/spikes.py | 2 +- test/test_distance.py | 2 +- test/test_function.py | 2 +- test/test_spikes.py | 2 +- 13 files changed, 20 insertions(+), 29 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/License b/License index 95d0405..472deac 100644 --- a/License +++ b/License @@ -1,21 +1,12 @@ -The MIT License (MIT) +BSD License -Copyright (c) 2014 Mario Mulansky, +Copyright (c) 2014, Mario Mulansky +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/examples/isi_matrix.py b/examples/isi_matrix.py index db740dd..7bf1cf9 100644 --- a/examples/isi_matrix.py +++ b/examples/isi_matrix.py @@ -5,7 +5,7 @@ trains. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/examples/merge.py b/examples/merge.py index 726d32b..2550cdb 100644 --- a/examples/merge.py +++ b/examples/merge.py @@ -4,7 +4,7 @@ Simple example showing the merging of two spike trains. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/examples/plot.py b/examples/plot.py index 5c3ad4a..da53670 100644 --- a/examples/plot.py +++ b/examples/plot.py @@ -4,7 +4,7 @@ Simple example showing how to load and plot spike trains and their distances. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 3867e6e..c58a6b1 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -1,7 +1,7 @@ """ Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ __all__ = ["function", "distances", "spikes"] diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index 4ab4381..ccf8060 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -12,7 +12,7 @@ improves the performance of spike_distance by a factor of 10! Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/distances.py b/pyspike/distances.py index b2eec92..3b9fe1f 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -4,7 +4,7 @@ Module containing several functions to compute spike distances Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ import numpy as np diff --git a/pyspike/function.py b/pyspike/function.py index 8107538..7722cc3 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -5,7 +5,7 @@ linear functions. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index cf1a92f..a1f5ea2 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -5,7 +5,7 @@ implementation. Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index c496ab8..d390222 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -4,7 +4,7 @@ Module containing several function to load and transform spike trains Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ import numpy as np diff --git a/test/test_distance.py b/test/test_distance.py index 3371cbd..b500b2c 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -4,7 +4,7 @@ Tests the isi- and spike-distance computation Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ diff --git a/test/test_function.py b/test/test_function.py index ed7d6bc..a579796 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -4,7 +4,7 @@ Tests the PieceWiseConst and PieceWiseLinear functions Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function diff --git a/test/test_spikes.py b/test/test_spikes.py index 349e0bf..bf914c0 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -4,7 +4,7 @@ Test loading of spike trains from text files Copyright 2014, Mario Mulansky -Distributed under the MIT License (MIT) +Distributed under the BSD License """ from __future__ import print_function -- cgit v1.2.3 From cf91333e4aac74d96cca32042f4363e79e7ab051 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 14 Oct 2014 17:14:44 +0200 Subject: more docs --- Readme.md | 27 +++++++++++++++++++++++++++ pyspike/spikes.py | 3 ++- setup.py | 5 +---- 3 files changed, 30 insertions(+), 5 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/Readme.md b/Readme.md index a1b6e9e..55dbd33 100644 --- a/Readme.md +++ b/Readme.md @@ -64,6 +64,12 @@ If the time interval is provided (`time_interval is not None`), auxiliary spikes Furthermore, the spike trains are ordered via `np.sort` (disable this feature by providing `sort=False` as a parameter to the load function). As result, `load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. +If you load spike trains yourself, i.e. from data files with different structure, you can use the helper function `add_auxiliary_spikes` to add the auxiliary spikes at the beginning and end of the observation interval. +Both the ISI and the SPIKE distance computation require the presence of auxiliary spikes, so make sure you have those in your spike trains: + + spike_train = spk.add_auxiliary_spikes(spike_train, (T_start, T_end)) + # you provide only a single value, it is interpreted as T_end, while T_start=0 + spike_train = spk.add_auxiliary_spikes(spike_train, T_end) ## Computing bi-variate distances @@ -74,9 +80,30 @@ As result, `load_spike_trains_from_txt` returns a *list of arrays* containing th >For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed ordered. >Make sure that all your spike trains are ordered. >If in doubt, use `spike_train = np.sort(spike_train)` to obtain a correctly ordered spike train. +> +>Furthermore, the spike trains should have auxiliary spikes at the beginning and end of the observation interval. +>You can ensure this by providing the `time_interval` in the `load_spike_trains_from_txt` function, or calling `add_auxiliary_spikes` for your spike trains. +>The spike trains must have *the same* observation interval! ---------------------- +### ISI-distance + +The following code loads some exemplary spike trains, computes the dissimilarity profile ISI-distance of the first two spike trains, and plots it with matplotlib: + + import matplotlib.pyplot as plt + import pyspike as spk + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + time_interval=(0, 4000)) + isi_profile = spk.isi_distance(spike_trains[0], spike_trains[1]) + x, y = isi_profile.get_plottable_data() + plt.plot(x, np.abs(y), '--k') + print("ISI distance: %.8f" % isi_profil.abs_avrg()) + plt.show() + +The ISI-profile is a piece-wise constant function, there the function `isi_distance` returns an instance of the `PieceWiseConstFunc` class. +As above, this class allows you to obtain arrays that can be used to plot the function with `plt.plt`, but also to compute the absolute average, which amounts to the final scalar ISI-distance. ## Computing multi-variate distances diff --git a/pyspike/spikes.py b/pyspike/spikes.py index d390222..68c8bc1 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -20,7 +20,8 @@ def add_auxiliary_spikes(spike_train, time_interval): - time_interval: A pair (T_start, T_end) of values representing the start and end time of the spike train measurement or a single value representing the end time, the T_start is then assuemd as 0. Auxiliary spikes will be - added to the spike train at the beginning and end of this interval. + added to the spike train at the beginning and end of this interval, if they + are not yet present. Returns: - spike train with additional spikes at T_start and T_end. diff --git a/setup.py b/setup.py index 7c8e4e1..16a87ea 100644 --- a/setup.py +++ b/setup.py @@ -5,12 +5,9 @@ Handles the compilation of pyx source files Copyright 2014, Mario Mulansky """ -import os from distutils.core import setup from Cython.Build import cythonize -import numpy.distutils.intelccompiler - setup( - ext_modules = cythonize("pyspike/*.pyx") + ext_modules=cythonize("pyspike/*.pyx") ) -- cgit v1.2.3 From 3f810c231e661e9141c9c586ebd6d9d182488c92 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 16 Oct 2014 17:51:23 +0200 Subject: added sphinx doc generation --- doc/Makefile | 177 +++++++++++++++++++++++++++++++++ doc/conf.py | 271 +++++++++++++++++++++++++++++++++++++++++++++++++++ doc/index.rst | 22 +++++ doc/pyspike.rst | 38 ++++++++ pyspike/distances.py | 147 +++++++++++++++------------- pyspike/function.py | 126 +++++++++++++----------- pyspike/spikes.py | 58 +++++------ 7 files changed, 689 insertions(+), 150 deletions(-) create mode 100644 doc/Makefile create mode 100644 doc/conf.py create mode 100644 doc/index.rst create mode 100644 doc/pyspike.rst (limited to 'pyspike/spikes.py') diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000..5acfcec --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PySpike.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PySpike.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PySpike" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PySpike" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 0000000..48ebc7e --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# +# PySpike documentation build configuration file, created by +# sphinx-quickstart on Thu Oct 16 15:56:58 2014. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../pyspike')) + +def skip(app, what, name, obj, skip, options): + if name == "__init__": + return False + return skip + +def setup(app): + app.connect("autodoc-skip-member", skip) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.viewcode', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'PySpike' +copyright = u'2014, Mario Mulansky' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'PySpikedoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'PySpike.tex', u'PySpike Documentation', + u'Mario Mulansky', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'pyspike', u'PySpike Documentation', + [u'Mario Mulansky'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'PySpike', u'PySpike Documentation', + u'Mario Mulansky', 'PySpike', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 0000000..b94c162 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,22 @@ +.. PySpike documentation master file, created by + sphinx-quickstart on Thu Oct 16 15:56:58 2014. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to PySpike's documentation! +=================================== + +Reference: + +.. toctree:: + :maxdepth: 2 + + pyspike + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/pyspike.rst b/doc/pyspike.rst new file mode 100644 index 0000000..39adea0 --- /dev/null +++ b/doc/pyspike.rst @@ -0,0 +1,38 @@ +pyspike package +=============== + +Submodules +---------- + +pyspike.distances module +------------------------ + +.. automodule:: pyspike.distances + :members: + :undoc-members: + :show-inheritance: + +pyspike.function module +----------------------- + +.. automodule:: pyspike.function + :members: + :undoc-members: + :show-inheritance: + +pyspike.spikes module +--------------------- + +.. automodule:: pyspike.spikes + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyspike + :members: + :undoc-members: + :show-inheritance: diff --git a/pyspike/distances.py b/pyspike/distances.py index 3e97b77..b0af24c 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -17,15 +17,17 @@ from pyspike import PieceWiseConstFunc, PieceWiseLinFunc # isi_profile ############################################################ def isi_profile(spikes1, spikes2): - """ Computes the isi-distance profile S_isi(t) of the two given spike - trains. Retruns the profile as a PieceWiseConstFunc object. The S_isi + """ Computes the isi-distance profile :math:`S_{isi}(t)` of the two given + spike trains. Retruns the profile as a PieceWiseConstFunc object. The S_isi values are defined positive S_isi(t)>=0. The spike trains are expected to have auxiliary spikes at the beginning and end of the interval. Use the function add_auxiliary_spikes to add those spikes to the spike train. - Args: - - spikes1, spikes2: ordered arrays of spike times with auxiliary spikes. - Returns: - - PieceWiseConstFunc describing the isi-distance. + + :param spikes1: ordered array of spike times with auxiliary spikes. + :param spikes2: ordered array of spike times with auxiliary spikes. + :returns: The isi-distance profile :math:`S_{isi}(t)` + :rtype: :class:`pyspike.function.PieceWiseConstFunc` + """ # check for auxiliary spikes - first and last spikes should be identical assert spikes1[0] == spikes2[0], \ @@ -52,12 +54,15 @@ Falling back to slow python backend.") ############################################################ def isi_distance(spikes1, spikes2): """ Computes the isi-distance I of the given spike trains. The - isi-distance is the integral over the isi distance profile S_isi(t): - I = \int_^T S_isi(t) dt. - Args: - - spikes1, spikes2: ordered arrays of spike times with auxiliary spikes. - Returns: - - double value: The isi-distance I. + isi-distance is the integral over the isi distance profile + :math:`S_{isi}(t)`: + + .. math:: I = \int_0^T S_{isi}(t) dt. + + :param spikes1: ordered array of spike times with auxiliary spikes. + :param spikes2: ordered array of spike times with auxiliary spikes. + :returns: The isi-distance I. + :rtype: double """ return isi_profile(spikes1, spikes2).avrg() @@ -71,10 +76,12 @@ def spike_profile(spikes1, spikes2): values are defined positive S_spike(t)>=0. The spike trains are expected to have auxiliary spikes at the beginning and end of the interval. Use the function add_auxiliary_spikes to add those spikes to the spike train. - Args: - - spikes1, spikes2: ordered arrays of spike times with auxiliary spikes. - Returns: - - PieceWiseLinFunc describing the spike-distance. + + :param spikes1: ordered array of spike times with auxiliary spikes. + :param spikes2: ordered array of spike times with auxiliary spikes. + :returns: The spike-distance profile :math:`S_{spike}(t). + :rtype: :class:`pyspike.function.PieceWiseLinFunc` + """ # check for auxiliary spikes - first and last spikes should be identical assert spikes1[0] == spikes2[0], \ @@ -104,18 +111,20 @@ def spike_distance(spikes1, spikes2): """ Computes the spike-distance S of the given spike trains. The spike-distance is the integral over the isi distance profile S_spike(t): S = \int_^T S_spike(t) dt. - Args: - - spikes1, spikes2: ordered arrays of spike times with auxiliary spikes. - Returns: - - double value: The spike-distance S. + + :param spikes1: ordered array of spike times with auxiliary spikes. + :param spikes2: ordered array of spike times with auxiliary spikes. + :returns: The spike-distance. + :rtype: double + """ return spike_profile(spikes1, spikes2).avrg() ############################################################ -# generic_profile_multi +# _generic_profile_multi ############################################################ -def generic_profile_multi(spike_trains, pair_distance_func, indices=None): +def _generic_profile_multi(spike_trains, pair_distance_func, indices=None): """ Internal implementation detail, don't call this function directly, use isi_profile_multi or spike_profile_multi instead. @@ -153,7 +162,7 @@ def generic_profile_multi(spike_trains, pair_distance_func, indices=None): ############################################################ # multi_distance_par ############################################################ -def multi_distance_par(spike_trains, pair_distance_func, indices=None): +def _multi_distance_par(spike_trains, pair_distance_func, indices=None): """ parallel implementation of the multi-distance. Not currently used as it does not improve the performance. """ @@ -210,14 +219,15 @@ def isi_profile_multi(spike_trains, indices=None): trains. That is the average isi-distance of all pairs of spike-trains: S_isi(t) = 2/((N(N-1)) sum_{} S_{isi}^{i,j}, where the sum goes over all pairs - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike trains to use, - if None all given spike trains are used (default=None) - Returns: - - A PieceWiseConstFunc representing the averaged isi distance S_isi(t) + + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type state: list or None + :returns: The averaged isi profile :math:`(t)` + :rtype: :class:`pyspike.function.PieceWiseConstFunc` """ - return generic_profile_multi(spike_trains, isi_profile, indices) + return _generic_profile_multi(spike_trains, isi_profile, indices) ############################################################ @@ -226,14 +236,14 @@ def isi_profile_multi(spike_trains, indices=None): def isi_distance_multi(spike_trains, indices=None): """ computes the multi-variate isi-distance for a set of spike-trains. That is the time average of the multi-variate spike profile: - S_isi = \int_0^T 2/((N(N-1)) sum_{} S_{isi}^{i,j}, + I = \int_0^T 2/((N(N-1)) sum_{} S_{isi}^{i,j}, where the sum goes over all pairs - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike trains to use, - if None all given spike trains are used (default=None) - Returns: - - A double value representing the averaged isi distance S_isi + + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :returns: The time-averaged isi distance :math:`I` + :rtype: double """ return isi_profile_multi(spike_trains, indices).avrg() @@ -246,14 +256,14 @@ def spike_profile_multi(spike_trains, indices=None): trains. That is the average spike-distance of all pairs of spike-trains: S_spike(t) = 2/((N(N-1)) sum_{} S_{spike}^{i, j}, where the sum goes over all pairs - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use, - if None all given spike trains are used (default=None) - Returns: - - A PieceWiseLinFunc representing the averaged spike distance S(t) + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :returns: The averaged spike profile :math:`(t)` + :rtype: :class:`pyspike.function.PieceWiseLinFunc` """ - return generic_profile_multi(spike_trains, spike_profile, indices) + return _generic_profile_multi(spike_trains, spike_profile, indices) ############################################################ @@ -264,12 +274,13 @@ def spike_distance_multi(spike_trains, indices=None): That is the time average of the multi-variate spike profile: S_{spike} = \int_0^T 2/((N(N-1)) sum_{} S_{spike}^{i, j} dt where the sum goes over all pairs - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use, - if None all given spike trains are used (default=None) - Returns: - - A double value representing the averaged spike distance S + + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :returns: The averaged spike distance S. + :rtype: double """ return spike_profile_multi(spike_trains, indices).avrg() @@ -277,7 +288,7 @@ def spike_distance_multi(spike_trains, indices=None): ############################################################ # generic_distance_matrix ############################################################ -def generic_distance_matrix(spike_trains, dist_function, indices=None): +def _generic_distance_matrix(spike_trains, dist_function, indices=None): """ Internal implementation detail. Don't use this function directly. Instead use isi_distance_matrix or spike_distance_matrix. Computes the time averaged distance of all pairs of spike-trains. @@ -311,15 +322,16 @@ def generic_distance_matrix(spike_trains, dist_function, indices=None): ############################################################ def isi_distance_matrix(spike_trains, indices=None): """ Computes the time averaged isi-distance of all pairs of spike-trains. - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use - if None all given spike-trains are used (default=None) - Return: - - a 2D array of size len(indices)*len(indices) containing the average - pair-wise isi-distance + + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :returns: 2D array with the pair wise time average isi distances + :math:`I_{ij}` + :rtype: np.array """ - return generic_distance_matrix(spike_trains, isi_distance, indices) + return _generic_distance_matrix(spike_trains, isi_distance, indices) ############################################################ @@ -327,12 +339,13 @@ def isi_distance_matrix(spike_trains, indices=None): ############################################################ def spike_distance_matrix(spike_trains, indices=None): """ Computes the time averaged spike-distance of all pairs of spike-trains. - Args: - - spike_trains: list of spike trains - - indices: list of indices defining which spike-trains to use - if None all given spike-trains are used (default=None) - Return: - - a 2D array of size len(indices)*len(indices) containing the average - pair-wise spike-distance + + :param spike_trains: list of spike trains + :param indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + :type indices: list or None + :returns: 2D array with the pair wise time average spike distances + :math:`S_{ij}` + :rtype: np.array """ - return generic_distance_matrix(spike_trains, spike_distance, indices) + return _generic_distance_matrix(spike_trains, spike_distance, indices) diff --git a/pyspike/function.py b/pyspike/function.py index b161034..ed47f27 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -16,15 +16,16 @@ import numpy as np ############################################################## # PieceWiseConstFunc ############################################################## -class PieceWiseConstFunc: +class PieceWiseConstFunc(object): """ A class representing a piece-wise constant function. """ def __init__(self, x, y): """ Constructs the piece-wise const function. - Args: - - x: array of length N+1 defining the edges of the intervals of the pwc - function. - - y: array of length N defining the function values at the intervals. + + :param x: array of length N+1 defining the edges of the intervals of + the pwc function. + :param y: array of length N defining the function values at the + intervals. """ # convert parameters to arrays, also ensures copying self.x = np.array(x) @@ -32,19 +33,19 @@ class PieceWiseConstFunc: def copy(self): """ Returns a copy of itself - Returns: - - PieceWiseConstFunc copy + + :rtype: :class:`PieceWiseConstFunc` """ return PieceWiseConstFunc(self.x, self.y) def almost_equal(self, other, decimal=14): """ Checks if the function is equal to another function up to `decimal` precision. - Args: - - other: another PieceWiseConstFunc object - Returns: - True if the two functions are equal up to `decimal` decimals, - False otherwise + + :param: other: another :class:`PieceWiseConstFunc` + :returns: True if the two functions are equal up to `decimal` decimals, + False otherwise + :rtype: bool """ eps = 10.0**(-decimal) return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ @@ -53,6 +54,14 @@ class PieceWiseConstFunc: def get_plottable_data(self): """ Returns two arrays containing x- and y-coordinates for immeditate plotting of the piece-wise function. + + :returns: (x_plot, y_plot) containing plottable data + :rtype: pair of np.array + + Example:: + + x, y = f.get_plottable_data() + plt.plot(x, y, '-o', label="Piece-wise const function") """ x_plot = np.empty(2*len(self.x)-2) @@ -67,9 +76,10 @@ class PieceWiseConstFunc: def avrg(self): """ Computes the average of the piece-wise const function: - a = 1/T int f(x) dx where T is the length of the interval. - Returns: - - the average a. + :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + + :returns: the average a. + :rtype: double """ return np.sum((self.x[1:]-self.x[:-1]) * self.y) / \ (self.x[-1]-self.x[0]) @@ -77,8 +87,9 @@ class PieceWiseConstFunc: def add(self, f): """ Adds another PieceWiseConst function to this function. Note: only functions defined on the same interval can be summed. - Args: - - f: PieceWiseConst function to be added. + + :param f: :class:`PieceWiseConstFunc` function to be added. + :rtype: None """ assert self.x[0] == f.x[0], "The functions have different intervals" assert self.x[-1] == f.x[-1], "The functions have different intervals" @@ -99,8 +110,10 @@ that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ def mul_scalar(self, fac): """ Multiplies the function with a scalar value - Args: - - fac: Value to multiply + + :param fac: Value to multiply + :type fac: double + :rtype: None """ self.y *= fac @@ -113,13 +126,13 @@ class PieceWiseLinFunc: def __init__(self, x, y1, y2): """ Constructs the piece-wise linear function. - Args: - - x: array of length N+1 defining the edges of the intervals of the pwc - function. - - y1: array of length N defining the function values at the left of the - intervals. - - y2: array of length N defining the function values at the right of - the intervals. + + :param x: array of length N+1 defining the edges of the intervals of + the pwc function. + :param y1: array of length N defining the function values at the left + of the intervals. + :param y2: array of length N defining the function values at the right + of the intervals. """ # convert to array, which also ensures copying self.x = np.array(x) @@ -128,19 +141,19 @@ class PieceWiseLinFunc: def copy(self): """ Returns a copy of itself - Returns: - - PieceWiseLinFunc copy + + :rtype: :class`PieceWiseLinFunc` """ return PieceWiseLinFunc(self.x, self.y1, self.y2) def almost_equal(self, other, decimal=14): """ Checks if the function is equal to another function up to `decimal` precision. - Args: - - other: another PieceWiseLinFunc object - Returns: - True if the two functions are equal up to `decimal` decimals, - False otherwise + + :param: other: another :class:`PieceWiseLinFunc` + :returns: True if the two functions are equal up to `decimal` decimals, + False otherwise + :rtype: bool """ eps = 10.0**(-decimal) return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ @@ -150,6 +163,14 @@ class PieceWiseLinFunc: def get_plottable_data(self): """ Returns two arrays containing x- and y-coordinates for immeditate plotting of the piece-wise function. + + :returns: (x_plot, y_plot) containing plottable data + :rtype: pair of np.array + + Example:: + + x, y = f.get_plottable_data() + plt.plot(x, y, '-o', label="Piece-wise const function") """ x_plot = np.empty(2*len(self.x)-2) x_plot[0] = self.x[0] @@ -162,27 +183,20 @@ class PieceWiseLinFunc: def avrg(self): """ Computes the average of the piece-wise linear function: - a = 1/T int f(x) dx where T is the length of the interval. - Returns: - - the average a. + :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + + :returns: the average a. + :rtype: double """ return np.sum((self.x[1:]-self.x[:-1]) * 0.5*(self.y1+self.y2)) / \ (self.x[-1]-self.x[0]) - def abs_avrg(self): - """ Computes the absolute average of the piece-wise linear function: - a = 1/T int |f(x)| dx where T is the length of the interval. - Returns: - - the average a. - """ - return np.sum((self.x[1:]-self.x[:-1]) * 0.5 * - (np.abs(self.y1)+np.abs(self.y2)))/(self.x[-1]-self.x[0]) - def add(self, f): """ Adds another PieceWiseLin function to this function. Note: only functions defined on the same interval can be summed. - Args: - - f: PieceWiseLin function to be added. + + :param f: :class:`PieceWiseLinFunc` function to be added. + :rtype: None """ assert self.x[0] == f.x[0], "The functions have different intervals" assert self.x[-1] == f.x[-1], "The functions have different intervals" @@ -209,8 +223,10 @@ that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ def mul_scalar(self, fac): """ Multiplies the function with a scalar value - Args: - - fac: Value to multiply + + :param fac: Value to multiply + :type fac: double + :rtype: None """ self.y1 *= fac self.y2 *= fac @@ -218,12 +234,12 @@ that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ def average_profile(profiles): """ Computes the average profile from the given ISI- or SPIKE-profiles. - Args: - - profiles: list of PieceWiseConstFunc or PieceWiseLinFunc representing - ISI- or SPIKE-profiles to be averaged - Returns: - - avrg_profile: PieceWiseConstFunc or PieceWiseLinFunc containing the - average profile. + + :param profiles: list of :class:`PieceWiseConstFunc` or + :class:`PieceWiseLinFunc` representing ISI- or + SPIKE-profiles to be averaged. + :returns: the averages profile :math:`` or :math:``. + :rtype: :class:`PieceWiseConstFunc` or :class:`PieceWiseLinFunc` """ assert len(profiles) > 1 diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 68c8bc1..6b6e2e7 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -15,15 +15,16 @@ import numpy as np ############################################################ def add_auxiliary_spikes(spike_train, time_interval): """ Adds spikes at the beginning and end of the given time interval. - Args: - - spike_train: ordered array of spike times - - time_interval: A pair (T_start, T_end) of values representing the start - and end time of the spike train measurement or a single value representing - the end time, the T_start is then assuemd as 0. Auxiliary spikes will be - added to the spike train at the beginning and end of this interval, if they - are not yet present. - Returns: - - spike train with additional spikes at T_start and T_end. + + :param spike_train: ordered array of spike times + :param time_interval: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement or + a single value representing the end time, the T_start + is then assuemd as 0. Auxiliary spikes will be added + to the spike train at the beginning and end of this + interval, if they are not yet present. + :type time_interval: pair of doubles or double + :returns: spike train with additional spikes at T_start and T_end. """ try: @@ -49,12 +50,11 @@ def add_auxiliary_spikes(spike_train, time_interval): ############################################################ def spike_train_from_string(s, sep=' ', sort=True): """ Converts a string of times into an array of spike times. - Args: - - s: the string with (ordered) spike times - - sep: The separator between the time numbers, default=' '. - - sort: If True, the spike times are order via `np.sort`, default=True. - Returns: - - array of spike times + + :param s: the string with (ordered) spike times + :param sep: The separator between the time numbers, default=' '. + :param sort: If True, the spike times are order via `np.sort`, default=True + :returns: array of spike times """ if sort: return np.sort(np.fromstring(s, sep=sep)) @@ -75,15 +75,18 @@ def load_spike_trains_from_txt(file_name, time_interval=None, end of each spike train. However, if `time_interval == None`, no auxiliary spikes are added, but note that the Spike and ISI distance both require auxiliary spikes. - Args: - - file_name: The name of the text file. - - time_interval: A pair (T_start, T_end) of values representing the start - and end time of the spike train measurement or a single value representing - the end time, the T_start is then assuemd as 0. Auxiliary spikes will be - added to the spike train at the beginning and end of this interval. - - separator: The character used to seprate the values in the text file. - - comment: Lines starting with this character are ignored. - - sort: If true, the spike times are order via `np.sort`, default=True. + + :param file_name: The name of the text file. + :param time_interval: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement + or a single value representing the end time, the + T_start is then assuemd as 0. Auxiliary spikes will + be added to the spike train at the beginning and end + of this interval. + :param separator: The character used to seprate the values in the text file + :param comment: Lines starting with this character are ignored. + :param sort: If true, the spike times are order via `np.sort`, default=True + :returns: list of spike trains """ spike_trains = [] spike_file = open(file_name, 'r') @@ -102,10 +105,9 @@ def load_spike_trains_from_txt(file_name, time_interval=None, ############################################################ def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. - Args: - - spike_trains: list of arrays of spike times - Returns: - - array with the merged spike times + + :param spike_trains: list of arrays of spike times + :returns: spike train with the merged spike times """ # get the lengths of the spike trains lens = np.array([len(st) for st in spike_trains]) -- cgit v1.2.3 From 110d9c0e596c7a87fdc1c890e48732acd98375d7 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 4 Nov 2014 09:38:42 +0100 Subject: change "sort" parameter to "is_sorted" --- Readme.rst | 4 ++-- pyspike/spikes.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/Readme.rst b/Readme.rst index b9f29e3..662cc1f 100644 --- a/Readme.rst +++ b/Readme.rst @@ -78,7 +78,7 @@ To quickly obtain spike trains from such files, PySpike provides the function :c This function expects the name of the data file as first parameter. Additionally, the time interval of the spike train measurement can be provided as a pair of start- and end-time values. If the time interval is provided (:code:`time_interval is not None`), auxiliary spikes at the start- and end-time of the interval are added to the spike trains. -Furthermore, the spike trains are ordered via :code:`np.sort` (disable this feature by providing :code:`sort=False` as a parameter to the load function). +Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function). As result, :code:`load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. If you load spike trains yourself, i.e. from data files with different structure, you can use the helper function :code:`add_auxiliary_spikes` to add the auxiliary spikes at the beginning and end of the observation interval. @@ -99,7 +99,7 @@ Computing bivariate distances profiles Spike trains are expected to be *sorted*! For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted. - Make sure that all your spike trains are sorted. + Make sure that all your spike trains are sorted, which is ensured if you use the `load_spike_trains_from_txt` function with the parameter `is_sorted=False`. If in doubt, use :code:`spike_train = np.sort(spike_train)` to obtain a correctly sorted spike train. Furthermore, the spike trains should have auxiliary spikes at the beginning and end of the observation interval. diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6b6e2e7..f7172c9 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -48,15 +48,16 @@ def add_auxiliary_spikes(spike_train, time_interval): ############################################################ # spike_train_from_string ############################################################ -def spike_train_from_string(s, sep=' ', sort=True): +def spike_train_from_string(s, sep=' ', is_sorted=False): """ Converts a string of times into an array of spike times. :param s: the string with (ordered) spike times :param sep: The separator between the time numbers, default=' '. - :param sort: If True, the spike times are order via `np.sort`, default=True + :param is_sorted: if True, the spike times are not sorted after loading, + if False, spike times are sorted with `np.sort` :returns: array of spike times """ - if sort: + if not(is_sorted): return np.sort(np.fromstring(s, sep=sep)) else: return np.fromstring(s, sep=sep) -- cgit v1.2.3 From 1b2aa84e7d642c7a5f4b99ca83b5ca25d6905960 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 21 Nov 2014 17:40:10 +0100 Subject: added spike generation function --- pyspike/__init__.py | 2 +- pyspike/cython_distance.pyx | 21 +++++++++++++++------ pyspike/spikes.py | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 7 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index d2d5b57..d700e7a 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -12,4 +12,4 @@ from distances import isi_profile, isi_distance, \ isi_profile_multi, isi_distance_multi, isi_distance_matrix, \ spike_profile_multi, spike_distance_multi, spike_distance_matrix from spikes import add_auxiliary_spikes, load_spike_trains_from_txt, \ - spike_train_from_string, merge_spike_trains + spike_train_from_string, merge_spike_trains, generate_poisson_spikes diff --git a/pyspike/cython_distance.pyx b/pyspike/cython_distance.pyx index 178fcba..779ff94 100644 --- a/pyspike/cython_distance.pyx +++ b/pyspike/cython_distance.pyx @@ -122,6 +122,15 @@ cdef inline double get_min_dist_cython(double spike_time, return d +############################################################ +# isi_avrg_cython +############################################################ +cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: + return 0.5*(isi1+isi2)*(isi1+isi2) + # alternative definition to obtain ~ 0.5 for Poisson spikes + # return 0.5*(isi1*isi1+isi2*isi2) + + ############################################################ # spike_distance_cython ############################################################ @@ -155,7 +164,7 @@ def spike_distance_cython(double[:] t1, isi2 = max(t2[1]-t2[0], t2[2]-t2[1]) s1 = dt_f1*(t1[1]-t1[0])/isi1 s2 = dt_f2*(t2[1]-t2[0])/isi2 - y_starts[0] = (s1*isi2 + s2*isi1) / ((isi1+isi2)**2/2) + y_starts[0] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) while True: # print(index, index1, index2) if t1[index1+1] < t2[index2+1]: @@ -169,12 +178,12 @@ def spike_distance_cython(double[:] t1, s1 = dt_p1 s2 = (dt_p2*(t2[index2+1]-t1[index1]) + dt_f2*(t1[index1]-t2[index2])) / isi2 - y_ends[index-1] = (s1*isi2 + s2*isi1)/(0.5*(isi1+isi2)*(isi1+isi2)) + y_ends[index-1] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) # now the next interval start value dt_f1 = get_min_dist_cython(t1[index1+1], t2, N2, index2) isi1 = t1[index1+1]-t1[index1] # s2 is the same as above, thus we can compute y2 immediately - y_starts[index] = (s1*isi2 + s2*isi1)/(0.5*(isi1+isi2)*(isi1+isi2)) + y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) elif t1[index1+1] > t2[index2+1]: index2 += 1 if index2+1 >= N2: @@ -185,13 +194,13 @@ def spike_distance_cython(double[:] t1, s1 = (dt_p1*(t1[index1+1]-t2[index2]) + dt_f1*(t2[index2]-t1[index1])) / isi1 s2 = dt_p2 - y_ends[index-1] = (s1*isi2 + s2*isi1) / (0.5*(isi1+isi2)*(isi1+isi2)) + y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) # now the next interval start value dt_f2 = get_min_dist_cython(t2[index2+1], t1, N1, index1) #s2 = dt_f2 isi2 = t2[index2+1]-t2[index2] # s2 is the same as above, thus we can compute y2 immediately - y_starts[index] = (s1*isi2 + s2*isi1)/(0.5*(isi1+isi2)*(isi1+isi2)) + y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) else: # t1[index1+1] == t2[index2+1] - generate only one event index1 += 1 index2 += 1 @@ -214,7 +223,7 @@ def spike_distance_cython(double[:] t1, isi2 = max(t2[N2-1]-t2[N2-2], t2[N2-2]-t2[N2-3]) s1 = dt_p1*(t1[N1-1]-t1[N1-2])/isi1 s2 = dt_p2*(t2[N2-1]-t2[N2-2])/isi2 - y_ends[index-1] = (s1*isi2 + s2*isi1) / (0.5*(isi1+isi2)*(isi1+isi2)) + y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) # end nogil # use only the data added above diff --git a/pyspike/spikes.py b/pyspike/spikes.py index f7172c9..aa25c48 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -129,3 +129,43 @@ def merge_spike_trains(spike_trains): index_list = index_list[index_list != i] vals = [spike_trains[n][indices[n]] for n in index_list] return merged_spikes + + +############################################################ +# generate_poisson_spikes +############################################################ +def generate_poisson_spikes(rate, time_interval, add_aux_spikes=True): + """ Generates a Poisson spike train with the given rate in the given time + interval + + :param rate: The rate of the spike trains + :param time_interval: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement or + a single value representing the end time, the T_start + is then assuemd as 0. Auxiliary spikes will be added + to the spike train at the beginning and end of this + interval, if they are not yet present. + :type time_interval: pair of doubles or double + :returns: Poisson spike train + """ + try: + T_start = time_interval[0] + T_end = time_interval[1] + except: + T_start = 0 + T_end = time_interval + # roughly how many spikes are required to fill the interval + N = max(1, int(1.2 * rate * (T_end-T_start))) + N_append = max(1, int(0.1 * rate * (T_end-T_start))) + intervals = np.random.exponential(1.0/rate, N) + # make sure we have enough spikes + while T_start + sum(intervals) < T_end: + print T_start + sum(intervals) + intervals = np.append(intervals, + np.random.exponential(1.0/rate, N_append)) + spikes = T_start + np.cumsum(intervals) + spikes = spikes[spikes < T_end] + if add_aux_spikes: + return add_auxiliary_spikes(spikes, time_interval) + else: + return spikes -- cgit v1.2.3 From fed0ceec753fc1a7e5a1e20632de5a9800fe4fb1 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 19 Jan 2015 16:39:17 +0100 Subject: final version for spike sync --- examples/spike_sync.py | 30 +++++++---- pyspike/__init__.py | 4 +- pyspike/distances.py | 36 +++---------- pyspike/function.py | 58 +++++++++----------- pyspike/python_backend.py | 135 ++++++++++++++++++++++------------------------ pyspike/spikes.py | 4 +- test/SPIKE_Sync_Test.txt | 100 ++++++++++++++++++++++++++++++++++ test/test_distance.py | 79 ++++++++++++++------------- 8 files changed, 262 insertions(+), 184 deletions(-) create mode 100644 test/SPIKE_Sync_Test.txt (limited to 'pyspike/spikes.py') diff --git a/examples/spike_sync.py b/examples/spike_sync.py index 464dbb0..535f19f 100644 --- a/examples/spike_sync.py +++ b/examples/spike_sync.py @@ -5,28 +5,36 @@ import matplotlib.pyplot as plt import pyspike as spk -spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", +spike_trains = spk.load_spike_trains_from_txt("../test/SPIKE_Sync_Test.txt", time_interval=(0, 4000)) -print(spike_trains[0]) -print(spike_trains[1]) - -# plt.plot(spike_trains[0], np.ones_like(spike_trains[0]), 'o') -# plt.plot(spike_trains[1], np.zeros_like(spike_trains[1]), 'o') - plt.figure() f = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) +# f = spk.spike_sync_profile(spikes1, spikes2) x, y = f.get_plottable_data() -plt.plot(x, y, '--k', label="SPIKE-SYNC profile") -print(x) -print(y) +plt.plot(x, y, '--ok', label="SPIKE-SYNC profile") +print(f.x) +print(f.y) +print(f.mp) + +print("Average:", f.avrg()) + f = spk.spike_profile(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") -plt.legend(loc="upper left") +plt.axis([0, 4000, -0.1, 1.1]) +plt.legend(loc="center right") + +plt.figure() + +f = spk.spike_sync_profile_multi(spike_trains) +x, y = f.get_plottable_data() +plt.plot(x, y, '-k', label="SPIKE-SYNC profile") + +print("Average:", f.avrg()) plt.show() diff --git a/pyspike/__init__.py b/pyspike/__init__.py index fa90d99..74d52c5 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -6,8 +6,8 @@ Distributed under the BSD License __all__ = ["function", "distances", "spikes"] -from function import PieceWiseConstFunc, PieceWiseLinFunc, IntervalSequence,\ - average_profile +from function import PieceWiseConstFunc, PieceWiseLinFunc, \ + MultipleValueSequence, average_profile from distances import isi_profile, isi_distance, \ spike_profile, spike_distance, \ spike_sync_profile, spike_sync_distance, \ diff --git a/pyspike/distances.py b/pyspike/distances.py index 38c5cc2..5ee8261 100644 --- a/pyspike/distances.py +++ b/pyspike/distances.py @@ -11,7 +11,7 @@ import numpy as np import threading from functools import partial -from pyspike import PieceWiseConstFunc, PieceWiseLinFunc, IntervalSequence +from pyspike import PieceWiseConstFunc, PieceWiseLinFunc, MultipleValueSequence ############################################################ @@ -132,9 +132,7 @@ def spike_distance(spikes1, spikes2, interval=None): ############################################################ # spike_sync_profile ############################################################ -def spike_sync_profile(spikes1, spikes2, k=3): - - assert k > 0 +def spike_sync_profile(spikes1, spikes2): # cython implementation try: @@ -148,34 +146,16 @@ def spike_sync_profile(spikes1, spikes2, k=3): from python_backend import coincidence_python \ as coincidence_impl - st, J = coincidence_impl(spikes1, spikes2) - - N = len(J) - - # compute the cumulative sum, include some extra values for boundary - # conditions - c = np.zeros(N + 2*k) - c[k:-k] = np.cumsum(J) - # set the boundary values - # on the left: c_0 = -c_1, c_{-1} = -c_2, ..., c{-k+1} = c_k - # on the right: c_{N+1} = c_N, c_{N+2} = 2*c_N - c_{N-1}, - # c_{N+2} = 2*c_N - c_{N-2}, ..., c_{N+k} = 2*c_N - c_{N-k+1} - for n in xrange(k): - c[k-n-1] = -c[k+n] - c[-k+n] = 2*c[-k-1] - c[-k-1-n] - # with the right boundary values, the differences become trivial - J_w = c[2*k:] - c[:-2*k] - # normalize to half the interval width - J_w *= 1.0/k + times, coincidences, multiplicity = coincidence_impl(spikes1, spikes2) - return IntervalSequence(st, J_w) + return MultipleValueSequence(times, coincidences, multiplicity) ############################################################ # spike_sync_distance ############################################################ -def spike_sync_distance(spikes1, spikes2, k=3): - return spike_sync_profile(spikes1, spikes2, k).avrg() +def spike_sync_distance(spikes1, spikes2): + return spike_sync_profile(spikes1, spikes2).avrg() ############################################################ @@ -336,7 +316,7 @@ def spike_profile_multi(spike_trains, indices=None): ############################################################ # spike_profile_multi ############################################################ -def spike_sync_profile_multi(spike_trains, indices=None, k=3): +def spike_sync_profile_multi(spike_trains, indices=None): """ Computes the multi-variate spike synchronization profile for a set of spike trains. That is the average spike-distance of all pairs of spike trains: @@ -351,7 +331,7 @@ def spike_sync_profile_multi(spike_trains, indices=None, k=3): :rtype: :class:`pyspike.function.PieceWiseConstFunc` """ - prof_func = partial(spike_sync_profile, k=k) + prof_func = partial(spike_sync_profile) average_dist, M = _generic_profile_multi(spike_trains, prof_func, indices) # average_dist.mul_scalar(1.0/M) # no normalization here! diff --git a/pyspike/function.py b/pyspike/function.py index f5a1133..f10c136 100644 --- a/pyspike/function.py +++ b/pyspike/function.py @@ -171,32 +171,32 @@ that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ ############################################################## -# IntervalSequence +# MultipleValueSequence ############################################################## -class IntervalSequence(object): +class MultipleValueSequence(object): """ A class representing a sequence of values defined in some interval. - This is very similar to a `PieceWiseConstFunc`, but with different - averaging and addition. """ - def __init__(self, x, y): - """ Constructs the interval sequence. + def __init__(self, x, y, multiplicity): + """ Constructs the value sequence. - :param x: array of length N+1 defining the edges of the intervals of - the intervals. - :param y: array of length N defining the values at the intervals. + :param x: array of length N defining the points at which the values are + defined. + :param y: array of length N degining the values at the points x. + :param multiplicity: array of length N defining the multiplicity of the + values. """ # convert parameters to arrays, also ensures copying self.x = np.array(x) self.y = np.array(y) - self.extra_zero_intervals = 0 + self.mp = np.array(multiplicity) def copy(self): """ Returns a copy of itself :rtype: :class:`IntervalSequence` """ - return IntervalSequence(self.x, self.y) + return MultipleValueSequence(self.x, self.y, self.mp) def almost_equal(self, other, decimal=14): """ Checks if the function is equal to another function up to `decimal` @@ -209,9 +209,10 @@ class IntervalSequence(object): """ eps = 10.0**(-decimal) return np.allclose(self.x, other.x, atol=eps, rtol=0.0) and \ - np.allclose(self.y, other.y, atol=eps, rtol=0.0) + np.allclose(self.y, other.y, atol=eps, rtol=0.0) and \ + np.allclose(self.mp, other.mp, atol=eps, rtol=0.0) - def get_plottable_data(self): + def get_plottable_data(self, k=0): """ Returns two arrays containing x- and y-coordinates for immeditate plotting of the interval sequence. @@ -224,17 +225,10 @@ class IntervalSequence(object): plt.plot(x, y, '-o', label="Piece-wise const function") """ - x_plot = np.empty(2*len(self.x)-2) - x_plot[0] = self.x[0] - x_plot[1::2] = self.x[1:] - x_plot[2::2] = self.x[1:-1] - y_plot = np.empty(2*len(self.y)) - y_plot[::2] = self.y - normalization = 1.0 * (len(self.y)-1) / (len(self.y) + - self.extra_zero_intervals-1) - y_plot[1::2] = self.y + if k > 0: + raise NotImplementedError() - return x_plot, y_plot * normalization + return 1.0*self.x, 1.0*self.y/self.mp def integral(self, interval=None): """ Returns the integral over the given interval. For the interval @@ -250,7 +244,7 @@ class IntervalSequence(object): if interval is None: # no interval given, integrate over the whole spike train # don't count the first value, which is zero by definition - a = np.sum(self.y) + a = 1.0*np.sum(self.y[1:-1]) else: raise NotImplementedError() return a @@ -270,15 +264,15 @@ class IntervalSequence(object): if interval is None: # no interval given, average over the whole spike train # don't count the first interval for normalization - return self.integral() / (len(self.y)-1+self.extra_zero_intervals) + return self.integral() / np.sum(self.mp[1:-1]) else: raise NotImplementedError() def add(self, f): - """ Adds another `IntervalSequence` function to this function. + """ Adds another `MultipleValueSequence` function to this function. Note: only functions defined on the same interval can be summed. - :param f: :class:`IntervalSequence` function to be added. + :param f: :class:`MultipleValueSequence` function to be added. :rtype: None """ assert self.x[0] == f.x[0], "The functions have different intervals" @@ -293,12 +287,12 @@ class IntervalSequence(object): # that PySpike is installed by running\n 'python setup.py build_ext --inplace'! \ # \n Falling back to slow python backend.") # use python backend - from python_backend import add_interval_sequence_python as \ - add_interval_sequence_impl + from python_backend import add_multiple_value_sequence_python as \ + add_multiple_value_sequence_impl - self.x, self.y, extra_intervals = \ - add_interval_sequence_impl(self.x, self.y, f.x, f.y) - self.extra_zero_intervals += extra_intervals + self.x, self.y, self.mp = \ + add_multiple_value_sequence_impl(self.x, self.y, self.mp, + f.x, f.y, f.mp) def mul_scalar(self, fac): """ Multiplies the function with a scalar value diff --git a/pyspike/python_backend.py b/pyspike/python_backend.py index 154d250..bbbd572 100644 --- a/pyspike/python_backend.py +++ b/pyspike/python_backend.py @@ -248,52 +248,69 @@ def cumulative_sync_python(spikes1, spikes2): def coincidence_python(spikes1, spikes2): def get_tau(spikes1, spikes2, i, j): - return 0.5*min([spikes1[i]-spikes1[i-1], spikes1[i+1]-spikes1[i], - spikes2[j]-spikes2[j-1], spikes2[j+1]-spikes2[j]]) + m = 1E100 # some huge number + if i < len(spikes1)-2: + m = min(m, spikes1[i+1]-spikes1[i]) + if j < len(spikes2)-2: + m = min(m, spikes2[j+1]-spikes2[j]) + if i > 1: + m = min(m, spikes1[i]-spikes1[i-1]) + if j > 1: + m = min(m, spikes2[j]-spikes2[j-1]) + return 0.5*m N1 = len(spikes1) N2 = len(spikes2) i = 0 j = 0 n = 0 - st = np.zeros(N1 + N2 - 2) - c = np.zeros(N1 + N2 - 3) - c[0] = 0 - st[0] = 0 - while n < N1 + N2: + st = np.zeros(N1 + N2 - 2) # spike times + c = np.zeros(N1 + N2 - 2) # coincidences + mp = np.ones(N1 + N2 - 2) # multiplicity + while n < N1 + N2 - 2: if spikes1[i+1] < spikes2[j+1]: i += 1 n += 1 tau = get_tau(spikes1, spikes2, i, j) st[n] = spikes1[i] - if spikes1[i]-spikes2[j] > tau: - c[n] = 0 - else: + if j > 0 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 c[n] = 1 + c[n-1] = 1 elif spikes1[i+1] > spikes2[j+1]: j += 1 n += 1 tau = get_tau(spikes1, spikes2, i, j) st[n] = spikes2[j] - if spikes2[j]-spikes1[i] > tau: - c[n] = 0 - else: + if i > 0 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 c[n] = 1 + c[n-1] = 1 else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains j += 1 i += 1 if i == N1-1 or j == N2-1: break n += 1 + # add only one event, but with coincidence 2 and multiplicity 2 st[n] = spikes1[i] - c[n] = 0 - n += 1 - st[n] = spikes1[i] - c[n] = 1 - #c[0] = c[2] + c[n] = 2 + mp[n] = 2 + + st = st[:n+2] + c = c[:n+2] + mp = mp[:n+2] + st[0] = spikes1[0] st[-1] = spikes1[-1] + c[0] = c[1] + c[-1] = c[-2] + mp[0] = mp[1] + mp[-1] = mp[-2] - return st, c + return st, c, mp ############################################################ @@ -341,83 +358,59 @@ def add_piece_wise_const_python(x1, y1, x2, y2): ############################################################ -# add_interval_sequence_python +# add_multiple_value_sequence_python ############################################################ -def add_interval_sequence_python(x1, y1, x2, y2): - yscale1 = np.empty_like(y1) - index2 = 1 - # s1 = (len(y1)+len(y2)-2.0) / (len(y1)-1.0) - # s2 = (len(y1)+len(y2)-2.0) / (len(y2)-1.0) - s1 = 1.0 - s2 = 1.0 - for i in xrange(len(y1)): - c = 1 - while index2 < len(x2)-1 and x2[index2] < x1[i+1]: - index2 += 1 - c += 1 - if index2 < len(x2)-1 and x2[index2] == x1[i+1]: - index2 += 1 - # c += 1 - yscale1[i] = s1/c - - yscale2 = np.empty_like(y2) - index1 = 1 - for i in xrange(len(y2)): - c = 1 - while index1 < len(x1)-1 and x1[index1] < x2[i+1]: - index1 += 1 - c += 1 - if index1 < len(x1)-1 and x1[index1] == x2[i+1]: - index1 += 1 - # c += 1 - yscale2[i] = s2/c +def add_multiple_value_sequence_python(x1, y1, mp1, x2, y2, mp2): x_new = np.empty(len(x1) + len(x2)) - y_new = np.empty(len(x_new)-1) + y_new = np.empty_like(x_new) + mp_new = np.empty_like(x_new) x_new[0] = x1[0] index1 = 0 index2 = 0 index = 0 - additional_intervals = 0 while (index1+1 < len(y1)) and (index2+1 < len(y2)): - y_new[index] = y1[index1]*yscale1[index1] + y2[index2]*yscale2[index2] - index += 1 - # print(index1+1, x1[index1+1], y1[index1+1], x_new[index]) if x1[index1+1] < x2[index2+1]: index1 += 1 + index += 1 x_new[index] = x1[index1] + y_new[index] = y1[index1] + mp_new[index] = mp1[index1] elif x1[index1+1] > x2[index2+1]: index2 += 1 + index += 1 x_new[index] = x2[index2] + y_new[index] = y2[index2] + mp_new[index] = mp2[index2] else: # x1[index1+1] == x2[index2+1] - # y_new[index] = y1[index1]*yscale1[index1] + \ - # y2[index2]*yscale2[index2] index1 += 1 - # x_new[index] = x1[index1] index2 += 1 - # index += 1 + index += 1 x_new[index] = x1[index1] - additional_intervals += 1 - y_new[index] = y1[index1]*yscale1[index1] + y2[index2]*yscale2[index2] + y_new[index] = y1[index1] + y2[index2] + mp_new[index] = mp1[index1] + mp2[index2] # one array reached the end -> copy the contents of the other to the end if index1+1 < len(y1): x_new[index+1:index+1+len(x1)-index1-1] = x1[index1+1:] - y_new[index+1:index+1+len(y1)-index1-1] = \ - y1[index1+1:]*yscale1[index1+1:] + y2[-1]*yscale2[-1] - index += len(x1)-index1-2 + y_new[index+1:index+1+len(x1)-index1-1] = y1[index1+1:] + mp_new[index+1:index+1+len(x1)-index1-1] = mp1[index1+1:] + index += len(x1)-index1-1 elif index2+1 < len(y2): x_new[index+1:index+1+len(x2)-index2-1] = x2[index2+1:] - y_new[index+1:index+1+len(y2)-index2-1] = \ - y2[index2+1:]*yscale2[index2+1:] + y1[-1]*yscale1[-1] - index += len(x2)-index2-2 - else: # both arrays reached the end simultaneously - # only the last x-value missing - x_new[index+1] = x1[-1] + y_new[index+1:index+1+len(x2)-index2-1] = y2[index2+1:] + mp_new[index+1:index+1+len(x2)-index2-1] = mp2[index2+1:] + index += len(x2)-index2-1 + # else: # both arrays reached the end simultaneously + # x_new[index+1] = x1[-1] + # y_new[index+1] = y1[-1] + y2[-1] + # mp_new[index+1] = mp1[-1] + mp2[-1] + + y_new[0] = y_new[1] + mp_new[0] = mp_new[1] + # the last value is again the end of the interval - # x_new[index+1] = x1[-1] # only use the data that was actually filled - - return x_new[:index+2], y_new[:index+1], additional_intervals + return x_new[:index+1], y_new[:index+1], mp_new[:index+1] ############################################################ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index aa25c48..6a3353e 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -67,7 +67,7 @@ def spike_train_from_string(s, sep=' ', is_sorted=False): # load_spike_trains_txt ############################################################ def load_spike_trains_from_txt(file_name, time_interval=None, - separator=' ', comment='#', sort=True): + separator=' ', comment='#', is_sorted=False): """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated by `separator`. Empty lines as well as lines starting with `comment` are @@ -94,7 +94,7 @@ def load_spike_trains_from_txt(file_name, time_interval=None, for line in spike_file: if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line, separator, sort) + spike_train = spike_train_from_string(line, separator, is_sorted) if time_interval is not None: # add auxil. spikes if times given spike_train = add_auxiliary_spikes(spike_train, time_interval) spike_trains.append(spike_train) diff --git a/test/SPIKE_Sync_Test.txt b/test/SPIKE_Sync_Test.txt new file mode 100644 index 0000000..b97f777 --- /dev/null +++ b/test/SPIKE_Sync_Test.txt @@ -0,0 +1,100 @@ +61.000000 171.000000 268.000000 278.000000 326.000000 373.000000 400.000000 577.000000 793.000000 796.000000 798.000000 936.000000 994.000000 1026.000000 1083.000000 1097.000000 1187.000000 1228.000000 1400.000000 1522.000000 1554.000000 1579.000000 1661.000000 1895.000000 2040.000000 2082.000000 2264.000000 2502.000000 2689.000000 2922.000000 3093.000000 3276.000000 3495.000000 3693.000000 3900.000000 + +195.000000 400.000000 518.000000 522.000000 565.000000 569.000000 572.000000 630.000000 802.000000 938.000000 1095.000000 1198.000000 1222.000000 1316.000000 1319.000000 1328.000000 1382.000000 1505.000000 1631.000000 1662.000000 1676.000000 1708.000000 1803.000000 1947.000000 1999.000000 2129.000000 2332.000000 2466.000000 2726.000000 2896.000000 3102.000000 3316.000000 3505.000000 3707.000000 3900.000000 + +45.000000 111.000000 282.000000 319.000000 366.000000 400.000000 570.000000 633.000000 673.000000 750.000000 796.000000 1014.000000 1096.000000 1167.000000 1180.000000 1237.000000 1341.000000 1524.000000 1571.000000 1574.000000 1590.000000 1610.000000 1832.000000 1869.000000 1949.000000 1968.000000 2353.000000 2497.000000 2713.000000 2868.000000 3095.000000 3302.000000 3525.000000 3704.000000 3900.000000 + +60.000000 135.000000 204.000000 260.000000 297.000000 361.000000 364.000000 400.000000 438.000000 631.000000 787.000000 794.000000 908.000000 927.000000 1205.000000 1251.000000 1315.000000 1463.000000 1546.000000 1548.000000 1569.000000 1604.000000 1705.000000 1733.000000 1994.000000 2146.000000 2306.000000 2554.000000 2691.000000 2905.000000 3090.000000 3296.000000 3508.000000 3702.000000 3900.000000 + +159.000000 186.000000 308.000000 331.000000 400.000000 624.000000 758.000000 805.000000 811.000000 876.000000 1018.000000 1122.000000 1193.000000 1308.000000 1354.000000 1524.000000 1550.000000 1672.000000 1728.000000 1738.000000 1899.000000 1919.000000 1980.000000 1991.000000 2050.000000 2124.000000 2308.000000 2450.000000 2703.000000 2876.000000 3096.000000 3288.000000 3494.000000 3709.000000 3900.000000 + +23.000000 134.000000 278.000000 400.000000 443.000000 555.000000 589.000000 597.000000 750.000000 754.000000 807.000000 1207.000000 1302.000000 1386.000000 1390.000000 1391.000000 1411.000000 1467.000000 1564.000000 1621.000000 1672.000000 1707.000000 1752.000000 1889.000000 1983.000000 2026.000000 2277.000000 2527.000000 2685.000000 2903.000000 3124.000000 3326.000000 3499.000000 3695.000000 3900.000000 + +65.000000 120.000000 142.000000 200.000000 331.000000 400.000000 683.000000 701.000000 707.000000 788.000000 795.000000 880.000000 948.000000 972.000000 1197.000000 1282.000000 1354.000000 1451.000000 1543.000000 1549.000000 1555.000000 1624.000000 1723.000000 1766.000000 2023.000000 2071.000000 2285.000000 2478.000000 2766.000000 2883.000000 3100.000000 3292.000000 3502.000000 3700.000000 3900.000000 + +83.000000 122.000000 214.000000 354.000000 400.000000 505.000000 614.000000 621.000000 697.000000 788.000000 846.000000 871.000000 878.000000 1174.000000 1204.000000 1215.000000 1317.000000 1353.000000 1365.000000 1453.000000 1463.000000 1540.000000 1832.000000 2016.000000 2023.000000 2290.000000 2449.000000 2708.000000 2881.000000 3090.000000 3329.000000 3489.000000 3705.000000 3900.000000 + +56.000000 62.000000 143.000000 226.000000 259.000000 400.000000 439.000000 441.000000 569.000000 572.000000 639.000000 697.000000 808.000000 1162.000000 1178.000000 1250.000000 1360.000000 1427.000000 1598.000000 1667.000000 1671.000000 1780.000000 1865.000000 1902.000000 1972.000000 2092.000000 2318.000000 2548.000000 2741.000000 2888.000000 3096.000000 3304.000000 3518.000000 3705.000000 3900.000000 + +110.000000 116.000000 215.000000 400.000000 462.000000 542.000000 602.000000 614.000000 619.000000 795.000000 1166.000000 1196.000000 1240.000000 1252.000000 1268.000000 1295.000000 1405.000000 1561.000000 1597.000000 1725.000000 1750.000000 1759.000000 1877.000000 1948.000000 2053.000000 2119.000000 2339.000000 2527.000000 2672.000000 2874.000000 3137.000000 3312.000000 3488.000000 3698.000000 3900.000000 + +141.000000 159.000000 332.000000 333.000000 400.000000 756.000000 801.000000 843.000000 1161.000000 1208.000000 1225.000000 1246.000000 1418.000000 1448.000000 1501.000000 1559.000000 1578.000000 1684.000000 1751.000000 1797.000000 1815.000000 1818.000000 1948.000000 1975.000000 1989.000000 2110.000000 2360.000000 2453.000000 2704.000000 2906.000000 3106.000000 3286.000000 3491.000000 3697.000000 3900.000000 + +61.000000 145.000000 151.000000 340.000000 400.000000 642.000000 741.000000 801.000000 901.000000 912.000000 939.000000 1072.000000 1180.000000 1216.000000 1271.000000 1336.000000 1344.000000 1584.000000 1608.000000 1617.000000 1648.000000 1695.000000 1789.000000 1835.000000 2053.000000 2089.000000 2223.000000 2531.000000 2688.000000 2901.000000 3114.000000 3268.000000 3496.000000 3703.000000 3900.000000 + +313.000000 378.000000 400.000000 548.000000 657.000000 691.000000 715.000000 728.000000 802.000000 813.000000 1092.000000 1203.000000 1237.000000 1388.000000 1562.000000 1566.000000 1573.000000 1659.000000 1781.000000 1788.000000 1821.000000 1825.000000 1835.000000 1985.000000 1993.000000 2152.000000 2308.000000 2492.000000 2681.000000 2890.000000 3101.000000 3305.000000 3492.000000 3694.000000 3900.000000 + +244.000000 311.000000 392.000000 400.000000 431.000000 441.000000 562.000000 577.000000 632.000000 791.000000 818.000000 875.000000 1020.000000 1059.000000 1134.000000 1164.000000 1201.000000 1238.000000 1273.000000 1387.000000 1562.000000 1609.000000 1831.000000 1949.000000 1961.000000 2088.000000 2329.000000 2509.000000 2691.000000 2902.000000 3096.000000 3279.000000 3506.000000 3704.000000 3900.000000 + +11.000000 111.000000 159.000000 277.000000 334.000000 400.000000 480.000000 646.000000 804.000000 1122.000000 1129.000000 1178.000000 1198.000000 1233.000000 1359.000000 1374.000000 1411.000000 1476.000000 1477.000000 1571.000000 1582.000000 1622.000000 1706.000000 1867.000000 1988.000000 2094.000000 2233.000000 2512.000000 2671.000000 2931.000000 3111.000000 3292.000000 3488.000000 3691.000000 3900.000000 + +57.000000 114.000000 328.000000 400.000000 442.000000 582.000000 662.000000 752.000000 766.000000 795.000000 1035.000000 1115.000000 1204.000000 1242.000000 1261.000000 1277.000000 1295.000000 1300.000000 1333.000000 1398.000000 1571.000000 1594.000000 1743.000000 1765.000000 2076.000000 2094.000000 2319.000000 2518.000000 2683.000000 2933.000000 3109.000000 3317.000000 3492.000000 3696.000000 3900.000000 + +92.000000 102.000000 111.000000 190.000000 400.000000 446.000000 478.000000 630.000000 631.000000 805.000000 823.000000 918.000000 985.000000 1199.000000 1209.000000 1217.000000 1355.000000 1466.000000 1503.000000 1563.000000 1582.000000 1636.000000 1819.000000 1944.000000 1977.000000 2014.000000 2359.000000 2428.000000 2728.000000 2868.000000 3101.000000 3296.000000 3509.000000 3708.000000 3900.000000 + +34.000000 66.000000 70.000000 113.000000 135.000000 238.000000 284.000000 400.000000 528.000000 766.000000 805.000000 921.000000 994.000000 1045.000000 1137.000000 1180.000000 1193.000000 1481.000000 1625.000000 1660.000000 1699.000000 1764.000000 1809.000000 1861.000000 1967.000000 2095.000000 2267.000000 2518.000000 2719.000000 2885.000000 3081.000000 3252.000000 3484.000000 3705.000000 3900.000000 + +65.000000 90.000000 123.000000 199.000000 330.000000 400.000000 805.000000 1005.000000 1035.000000 1044.000000 1064.000000 1138.000000 1155.000000 1205.000000 1217.000000 1248.000000 1318.000000 1345.000000 1403.000000 1567.000000 1609.000000 1781.000000 1875.000000 1929.000000 2024.000000 2140.000000 2258.000000 2477.000000 2747.000000 2890.000000 3120.000000 3325.000000 3510.000000 3708.000000 3900.000000 + +70.000000 221.000000 280.000000 400.000000 489.000000 786.000000 1016.000000 1027.000000 1029.000000 1145.000000 1186.000000 1195.000000 1256.000000 1304.000000 1314.000000 1476.000000 1618.000000 1657.000000 1730.000000 1748.000000 1802.000000 1812.000000 1832.000000 1947.000000 1999.000000 2027.000000 2288.000000 2532.000000 2679.000000 2919.000000 3077.000000 3316.000000 3516.000000 3705.000000 3900.000000 + +153.000000 400.000000 474.000000 532.000000 568.000000 693.000000 738.000000 798.000000 806.000000 949.000000 1077.000000 1083.000000 1098.000000 1169.000000 1172.000000 1192.000000 1517.000000 1530.000000 1538.000000 1560.000000 1582.000000 1699.000000 1981.000000 1982.000000 2171.000000 2312.000000 2475.000000 2680.000000 2887.000000 3119.000000 3300.000000 3502.000000 3701.000000 3900.000000 + +92.000000 152.000000 164.000000 400.000000 520.000000 619.000000 621.000000 647.000000 648.000000 808.000000 853.000000 865.000000 920.000000 949.000000 1148.000000 1225.000000 1231.000000 1348.000000 1375.000000 1635.000000 1646.000000 1686.000000 1711.000000 2004.000000 2079.000000 2347.000000 2501.000000 2709.000000 2930.000000 3061.000000 3319.000000 3494.000000 3690.000000 3900.000000 + +74.000000 103.000000 247.000000 265.000000 400.000000 495.000000 501.000000 534.000000 552.000000 557.000000 601.000000 604.000000 792.000000 1003.000000 1138.000000 1195.000000 1252.000000 1325.000000 1336.000000 1425.000000 1646.000000 1657.000000 1795.000000 1990.000000 1992.000000 2062.000000 2300.000000 2509.000000 2690.000000 2913.000000 3066.000000 3276.000000 3460.000000 3700.000000 3900.000000 + +45.000000 90.000000 156.000000 400.000000 468.000000 523.000000 577.000000 583.000000 708.000000 797.000000 815.000000 1052.000000 1063.000000 1189.000000 1215.000000 1218.000000 1266.000000 1288.000000 1299.000000 1512.000000 1519.000000 1584.000000 1769.000000 1791.000000 1964.000000 2082.000000 2348.000000 2530.000000 2703.000000 2893.000000 3031.000000 3290.000000 3504.000000 3702.000000 3900.000000 + +140.000000 269.000000 400.000000 475.000000 492.000000 520.000000 569.000000 645.000000 727.000000 794.000000 819.000000 834.000000 957.000000 1122.000000 1210.000000 1374.000000 1471.000000 1485.000000 1515.000000 1574.000000 1668.000000 1732.000000 1743.000000 1917.000000 2041.000000 2104.000000 2294.000000 2453.000000 2662.000000 2894.000000 3128.000000 3301.000000 3489.000000 3705.000000 3900.000000 + +28.000000 96.000000 112.000000 400.000000 426.000000 477.000000 584.000000 763.000000 804.000000 815.000000 1089.000000 1175.000000 1218.000000 1366.000000 1394.000000 1506.000000 1553.000000 1564.000000 1592.000000 1712.000000 1755.000000 1788.000000 1814.000000 1816.000000 1997.000000 2072.000000 2345.000000 2487.000000 2741.000000 2881.000000 3074.000000 3310.000000 3521.000000 3707.000000 3900.000000 + +215.000000 286.000000 400.000000 461.000000 488.000000 489.000000 768.000000 796.000000 885.000000 919.000000 1188.000000 1253.000000 1432.000000 1476.000000 1521.000000 1524.000000 1566.000000 1590.000000 1684.000000 1714.000000 1733.000000 1776.000000 1816.000000 1943.000000 2016.000000 2031.000000 2308.000000 2488.000000 2642.000000 2832.000000 3120.000000 3293.000000 3507.000000 3702.000000 3900.000000 + +77.000000 229.000000 302.000000 369.000000 400.000000 401.000000 404.000000 418.000000 804.000000 1026.000000 1110.000000 1179.000000 1187.000000 1227.000000 1456.000000 1458.000000 1476.000000 1629.000000 1630.000000 1640.000000 1697.000000 1734.000000 1785.000000 1919.000000 1956.000000 2057.000000 2324.000000 2416.000000 2656.000000 2889.000000 3126.000000 3323.000000 3491.000000 3696.000000 3900.000000 + +244.000000 302.000000 400.000000 455.000000 533.000000 562.000000 673.000000 748.000000 791.000000 1120.000000 1136.000000 1191.000000 1235.000000 1238.000000 1296.000000 1336.000000 1447.000000 1466.000000 1551.000000 1594.000000 1691.000000 1744.000000 1897.000000 1959.000000 2060.000000 2109.000000 2230.000000 2564.000000 2717.000000 2900.000000 3089.000000 3320.000000 3491.000000 3712.000000 3900.000000 + +3.000000 196.000000 199.000000 320.000000 339.000000 358.000000 400.000000 495.000000 690.000000 737.000000 760.000000 791.000000 849.000000 1027.000000 1194.000000 1220.000000 1242.000000 1313.000000 1354.000000 1435.000000 1523.000000 1621.000000 1775.000000 1788.000000 1999.000000 2074.000000 2245.000000 2478.000000 2750.000000 2893.000000 3113.000000 3302.000000 3485.000000 3690.000000 3900.000000 + +206.000000 234.000000 261.000000 277.000000 341.000000 374.000000 400.000000 465.000000 613.000000 672.000000 745.000000 793.000000 799.000000 917.000000 954.000000 1144.000000 1180.000000 1283.000000 1484.000000 1574.000000 1575.000000 1795.000000 1965.000000 1984.000000 2086.000000 2093.000000 2312.000000 2501.000000 2738.000000 2879.000000 3084.000000 3270.000000 3483.000000 3701.000000 3900.000000 + +154.000000 314.000000 400.000000 611.000000 615.000000 795.000000 823.000000 869.000000 908.000000 938.000000 960.000000 1024.000000 1049.000000 1068.000000 1185.000000 1420.000000 1441.000000 1496.000000 1610.000000 1709.000000 1712.000000 1740.000000 1885.000000 1917.000000 1992.000000 2079.000000 2224.000000 2508.000000 2713.000000 2861.000000 3096.000000 3300.000000 3509.000000 3696.000000 3900.000000 + +26.000000 51.000000 83.000000 121.000000 343.000000 400.000000 625.000000 695.000000 697.000000 783.000000 803.000000 933.000000 1014.000000 1135.000000 1158.000000 1210.000000 1548.000000 1589.000000 1662.000000 1663.000000 1674.000000 1677.000000 1733.000000 1801.000000 1978.000000 2027.000000 2276.000000 2477.000000 2687.000000 2946.000000 3108.000000 3293.000000 3503.000000 3702.000000 3900.000000 + +21.000000 39.000000 125.000000 198.000000 254.000000 400.000000 456.000000 510.000000 806.000000 881.000000 920.000000 1000.000000 1046.000000 1067.000000 1129.000000 1143.000000 1188.000000 1438.000000 1552.000000 1603.000000 1754.000000 1761.000000 1943.000000 1960.000000 1980.000000 2068.000000 2246.000000 2544.000000 2731.000000 2923.000000 3060.000000 3271.000000 3517.000000 3700.000000 3900.000000 + +166.000000 237.000000 295.000000 300.000000 319.000000 369.000000 400.000000 407.000000 413.000000 428.000000 439.000000 804.000000 831.000000 899.000000 971.000000 1164.000000 1199.000000 1259.000000 1331.000000 1497.000000 1564.000000 1832.000000 1881.000000 1915.000000 1970.000000 2189.000000 2271.000000 2482.000000 2742.000000 2863.000000 3116.000000 3293.000000 3492.000000 3705.000000 3900.000000 + +298.000000 323.000000 400.000000 423.000000 526.000000 662.000000 799.000000 821.000000 830.000000 933.000000 989.000000 1190.000000 1200.000000 1227.000000 1251.000000 1306.000000 1543.000000 1574.000000 1589.000000 1690.000000 1697.000000 1849.000000 1938.000000 1951.000000 2027.000000 2059.000000 2315.000000 2456.000000 2703.000000 2944.000000 3103.000000 3307.000000 3497.000000 3693.000000 3900.000000 + +60.000000 172.000000 400.000000 413.000000 420.000000 600.000000 660.000000 690.000000 752.000000 789.000000 951.000000 1056.000000 1176.000000 1201.000000 1290.000000 1440.000000 1450.000000 1456.000000 1638.000000 1653.000000 1703.000000 1710.000000 1730.000000 1856.000000 2006.000000 2082.000000 2296.000000 2383.000000 2693.000000 2887.000000 3091.000000 3299.000000 3485.000000 3691.000000 3900.000000 + +20.000000 127.000000 326.000000 369.000000 400.000000 521.000000 588.000000 595.000000 700.000000 798.000000 799.000000 858.000000 913.000000 1101.000000 1193.000000 1379.000000 1432.000000 1440.000000 1482.000000 1486.000000 1575.000000 1577.000000 1792.000000 1820.000000 1957.000000 2097.000000 2309.000000 2493.000000 2639.000000 2854.000000 3109.000000 3294.000000 3488.000000 3713.000000 3900.000000 + +65.000000 119.000000 362.000000 400.000000 779.000000 803.000000 804.000000 897.000000 938.000000 984.000000 1147.000000 1207.000000 1266.000000 1319.000000 1373.000000 1579.000000 1596.000000 1626.000000 1644.000000 1650.000000 1725.000000 1776.000000 1851.000000 1965.000000 2023.000000 2116.000000 2331.000000 2552.000000 2727.000000 2855.000000 3081.000000 3268.000000 3521.000000 3698.000000 3900.000000 + +4.000000 10.000000 50.000000 124.000000 151.000000 169.000000 314.000000 317.000000 400.000000 474.000000 549.000000 630.000000 704.000000 798.000000 1030.000000 1144.000000 1155.000000 1188.000000 1345.000000 1390.000000 1428.000000 1603.000000 1867.000000 1902.000000 1922.000000 1995.000000 2290.000000 2431.000000 2679.000000 2886.000000 3092.000000 3305.000000 3501.000000 3704.000000 3900.000000 + +31.000000 37.000000 44.000000 211.000000 400.000000 445.000000 454.000000 602.000000 641.000000 760.000000 802.000000 850.000000 945.000000 1079.000000 1104.000000 1149.000000 1201.000000 1305.000000 1537.000000 1568.000000 1613.000000 1702.000000 1805.000000 1958.000000 1969.000000 2112.000000 2300.000000 2532.000000 2680.000000 2952.000000 3124.000000 3303.000000 3500.000000 3695.000000 3900.000000 + +43.000000 259.000000 276.000000 342.000000 362.000000 375.000000 380.000000 400.000000 674.000000 800.000000 804.000000 809.000000 882.000000 947.000000 952.000000 1219.000000 1351.000000 1504.000000 1568.000000 1593.000000 1720.000000 1752.000000 1871.000000 1961.000000 2022.000000 2046.000000 2254.000000 2486.000000 2651.000000 2868.000000 3103.000000 3278.000000 3482.000000 3708.000000 3900.000000 + +1.000000 219.000000 227.000000 235.000000 241.000000 400.000000 606.000000 618.000000 645.000000 738.000000 797.000000 943.000000 1217.000000 1343.000000 1424.000000 1448.000000 1578.000000 1661.000000 1706.000000 1765.000000 1903.000000 1915.000000 1975.000000 1987.000000 2084.000000 2324.000000 2490.000000 2671.000000 2865.000000 3063.000000 3331.000000 3505.000000 3702.000000 3900.000000 + +103.000000 109.000000 356.000000 357.000000 400.000000 501.000000 714.000000 788.000000 793.000000 810.000000 859.000000 974.000000 1109.000000 1172.000000 1238.000000 1252.000000 1291.000000 1319.000000 1479.000000 1559.000000 1598.000000 1678.000000 1753.000000 1768.000000 1940.000000 2100.000000 2331.000000 2600.000000 2758.000000 2889.000000 3073.000000 3292.000000 3487.000000 3707.000000 3900.000000 + +234.000000 362.000000 388.000000 399.000000 400.000000 407.000000 452.000000 483.000000 692.000000 721.000000 797.000000 809.000000 863.000000 1216.000000 1227.000000 1338.000000 1445.000000 1473.000000 1536.000000 1596.000000 1608.000000 1619.000000 1914.000000 1990.000000 2052.000000 2117.000000 2316.000000 2488.000000 2682.000000 2918.000000 3104.000000 3299.000000 3506.000000 3696.000000 3900.000000 + +31.000000 91.000000 400.000000 422.000000 545.000000 587.000000 751.000000 794.000000 828.000000 962.000000 963.000000 1032.000000 1073.000000 1166.000000 1174.000000 1188.000000 1320.000000 1423.000000 1462.000000 1589.000000 1625.000000 1677.000000 1706.000000 1939.000000 2023.000000 2103.000000 2292.000000 2507.000000 2745.000000 2921.000000 3088.000000 3297.000000 3506.000000 3698.000000 3900.000000 + +35.000000 92.000000 237.000000 296.000000 400.000000 515.000000 601.000000 613.000000 798.000000 852.000000 1201.000000 1248.000000 1257.000000 1286.000000 1429.000000 1616.000000 1633.000000 1656.000000 1778.000000 1819.000000 1838.000000 1864.000000 1903.000000 1918.000000 1991.000000 2106.000000 2315.000000 2455.000000 2690.000000 2891.000000 3084.000000 3280.000000 3488.000000 3698.000000 3900.000000 + +20.000000 25.000000 172.000000 223.000000 274.000000 295.000000 368.000000 372.000000 400.000000 493.000000 717.000000 775.000000 795.000000 1015.000000 1200.000000 1319.000000 1444.000000 1559.000000 1592.000000 1694.000000 1743.000000 1757.000000 1841.000000 1859.000000 2043.000000 2075.000000 2336.000000 2461.000000 2764.000000 2905.000000 3099.000000 3293.000000 3494.000000 3697.000000 3900.000000 + +76.000000 120.000000 130.000000 209.000000 396.000000 400.000000 559.000000 572.000000 671.000000 726.000000 803.000000 907.000000 1011.000000 1128.000000 1208.000000 1232.000000 1321.000000 1337.000000 1531.000000 1600.000000 1702.000000 1777.000000 1824.000000 1862.000000 1988.000000 1999.000000 2352.000000 2537.000000 2750.000000 2957.000000 3102.000000 3291.000000 3503.000000 3701.000000 3900.000000 + +135.000000 250.000000 302.000000 310.000000 393.000000 400.000000 417.000000 684.000000 730.000000 804.000000 981.000000 982.000000 1081.000000 1197.000000 1240.000000 1313.000000 1409.000000 1431.000000 1473.000000 1498.000000 1561.000000 1615.000000 1782.000000 1925.000000 1979.000000 2149.000000 2293.000000 2490.000000 2676.000000 2932.000000 3117.000000 3315.000000 3502.000000 3690.000000 3900.000000 + diff --git a/test/test_distance.py b/test/test_distance.py index d98069d..4f8f6e8 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -135,53 +135,23 @@ def test_spike_sync(): spikes2 = np.array([2.1]) spikes1 = spk.add_auxiliary_spikes(spikes1, 4.0) spikes2 = spk.add_auxiliary_spikes(spikes2, 4.0) - for k in xrange(1, 3): - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2, k=k), - 0.5, decimal=16) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2), + 0.5, decimal=16) spikes2 = np.array([3.1]) spikes2 = spk.add_auxiliary_spikes(spikes2, 4.0) - for k in xrange(1, 3): - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2, k=k), - 0.5, decimal=16) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2), + 0.5, decimal=16) spikes2 = np.array([1.1]) spikes2 = spk.add_auxiliary_spikes(spikes2, 4.0) - for k in xrange(1, 3): - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2, k=k), - 0.5, decimal=16) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2), + 0.5, decimal=16) spikes2 = np.array([0.9]) spikes2 = spk.add_auxiliary_spikes(spikes2, 4.0) - for k in xrange(1, 3): - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2, k=k), - 0.5, decimal=16) - - spikes1 = np.array([100, 300, 400, 405, 410, 500, 700, 800, - 805, 810, 815, 900]) - spikes2 = np.array([100, 200, 205, 210, 295, 350, 400, 510, - 600, 605, 700, 910]) - spikes3 = np.array([100, 180, 198, 295, 412, 420, 510, 640, - 695, 795, 820, 920]) - spikes1 = spk.add_auxiliary_spikes(spikes1, 1000) - spikes2 = spk.add_auxiliary_spikes(spikes2, 1000) - spikes3 = spk.add_auxiliary_spikes(spikes3, 1000) - for k in xrange(1, 10): - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2, k=k), - 0.5, decimal=15) - assert_almost_equal(spk.spike_sync_distance(spikes1, spikes3, k=k), - 0.5, decimal=15) - assert_almost_equal(spk.spike_sync_distance(spikes2, spikes3, k=k), - 0.5, decimal=15) - - f1 = spk.spike_sync_profile(spikes1, spikes2, k=1) - f2 = spk.spike_sync_profile(spikes1, spikes3, k=1) - f3 = spk.spike_sync_profile(spikes2, spikes3, k=1) - f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3], k=1) - # hands on definition of the average multivariate spike synchronization - expected = (f1.integral() + f2.integral() + f3.integral()) / \ - (len(f1.y)+len(f2.y)+len(f3.y)-3) - assert_almost_equal(f.avrg(), expected, decimal=15) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2), + 0.5, decimal=16) def check_multi_profile(profile_func, profile_func_multi): @@ -226,6 +196,39 @@ def test_multi_spike(): check_multi_profile(spk.spike_profile, spk.spike_profile_multi) +def test_multi_spike_sync(): + # some basic multivariate check + spikes1 = np.array([100, 300, 400, 405, 410, 500, 700, 800, + 805, 810, 815, 900]) + spikes2 = np.array([100, 200, 205, 210, 295, 350, 400, 510, + 600, 605, 700, 910]) + spikes3 = np.array([100, 180, 198, 295, 412, 420, 510, 640, + 695, 795, 820, 920]) + spikes1 = spk.add_auxiliary_spikes(spikes1, 1000) + spikes2 = spk.add_auxiliary_spikes(spikes2, 1000) + spikes3 = spk.add_auxiliary_spikes(spikes3, 1000) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes2), + 0.5, decimal=15) + assert_almost_equal(spk.spike_sync_distance(spikes1, spikes3), + 0.5, decimal=15) + assert_almost_equal(spk.spike_sync_distance(spikes2, spikes3), + 0.5, decimal=15) + + f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3]) + # hands on definition of the average multivariate spike synchronization + # expected = (f1.integral() + f2.integral() + f3.integral()) / \ + # (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1])) + expected = 0.5 + assert_almost_equal(f.avrg(), expected, decimal=15) + + # multivariate regression test + spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt", + time_interval=(0, 4000)) + f = spk.spike_sync_profile_multi(spike_trains) + assert_equal(np.sum(f.y[1:-1]), 39932) + assert_equal(np.sum(f.mp[1:-1]), 85554) + + def check_dist_matrix(dist_func, dist_matrix_func): # generate spike trains: t1 = spk.add_auxiliary_spikes(np.array([0.2, 0.4, 0.6, 0.7]), 1.0) -- cgit v1.2.3 From ea61fc2ed03e42b3ea159b7ef7886d005c90e29f Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 3 Feb 2015 15:13:22 +0100 Subject: update docs to the structural changes --- Readme.rst | 4 ++-- doc/conf.py | 4 ++-- doc/pyspike.rst | 47 +++++++++++++++++++++++++++++++++++------------ pyspike/DiscreteFunc.py | 6 +++--- pyspike/spikes.py | 2 +- 5 files changed, 43 insertions(+), 20 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/Readme.rst b/Readme.rst index bcf6fa9..debd32e 100644 --- a/Readme.rst +++ b/Readme.rst @@ -11,7 +11,7 @@ All computation intensive parts are implemented in C via cython_ to reach a comp PySpike provides the same fundamental functionality as the SPIKY_ framework for Matlab, which additionally contains spike-train generators, more spike train distance measures and many visualization routines. -All source codes are published under the BSD_License_. +All source codes are available on `Github `_ and are published under the BSD_License_. .. [#] Kreuz T, Haas JS, Morelli A, Abarbanel HDI, Politi A, *Measuring spike train synchrony.* J Neurosci Methods 165, 151 (2007) `[pdf] `_ @@ -201,7 +201,7 @@ The parameter :code:`interval` is optional and if neglected the whole spike trai SPIKE synchronization -.............. +..................... **Important note:** diff --git a/doc/conf.py b/doc/conf.py index 48ebc7e..5427bb1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -57,7 +57,7 @@ master_doc = 'index' # General information about the project. project = u'PySpike' -copyright = u'2014, Mario Mulansky' +copyright = u'2014-2015, Mario Mulansky' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -254,7 +254,7 @@ man_pages = [ # dir menu entry, description, category) texinfo_documents = [ ('index', 'PySpike', u'PySpike Documentation', - u'Mario Mulansky', 'PySpike', 'One line description of project.', + u'Mario Mulansky', 'PySpike', 'Measure of spike train synchrony.', 'Miscellaneous'), ] diff --git a/doc/pyspike.rst b/doc/pyspike.rst index 39adea0..6aa36e7 100644 --- a/doc/pyspike.rst +++ b/doc/pyspike.rst @@ -4,35 +4,58 @@ pyspike package Submodules ---------- -pyspike.distances module ------------------------- +pyspike.isi_distance module +---------------------------------------- -.. automodule:: pyspike.distances +.. automodule:: pyspike.isi_distance :members: :undoc-members: :show-inheritance: -pyspike.function module ------------------------ +pyspike.spike_distance module +---------------------------------------- -.. automodule:: pyspike.function +.. automodule:: pyspike.spike_distance :members: :undoc-members: :show-inheritance: -pyspike.spikes module ---------------------- +pyspike.spike_sync module +---------------------------------------- -.. automodule:: pyspike.spikes +.. automodule:: pyspike.spike_sync + :members: + :undoc-members: + :show-inheritance: + +pyspike.PieceWiseConstFunc module +---------------------------------------- + +.. automodule:: pyspike.PieceWiseConstFunc + :members: + :undoc-members: + :show-inheritance: + +pyspike.PieceWiseLinFunc module +---------------------------------------- + +.. automodule:: pyspike.PieceWiseLinFunc :members: :undoc-members: :show-inheritance: +pyspike.DiscreteFunc module +---------------------------------------- -Module contents ---------------- +.. automodule:: pyspike.DiscreteFunc + :members: + :undoc-members: + :show-inheritance: + +pyspike.spikes module +---------------------------------------- -.. automodule:: pyspike +.. automodule:: pyspike.spikes :members: :undoc-members: :show-inheritance: diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py index 2283e03..bd13e1f 100644 --- a/pyspike/DiscreteFunc.py +++ b/pyspike/DiscreteFunc.py @@ -23,10 +23,10 @@ class DiscreteFunc(object): """ Constructs the discrete function. :param x: array of length N defining the points at which the values are - defined. + defined. :param y: array of length N degining the values at the points x. :param multiplicity: array of length N defining the multiplicity of the - values. + values. """ # convert parameters to arrays, also ensures copying self.x = np.array(x) @@ -174,7 +174,7 @@ class DiscreteFunc(object): def avrg(self, interval=None): """ Computes the average of the interval sequence: - :math:`a = 1/N sum f_n ` where N is the number of intervals. + :math:`a = 1/N sum f_n` where N is the number of intervals. :param interval: averaging interval given as a pair of floats, a sequence of pairs for averaging multiple intervals, or diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 6a3353e..c7a1e40 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -54,7 +54,7 @@ def spike_train_from_string(s, sep=' ', is_sorted=False): :param s: the string with (ordered) spike times :param sep: The separator between the time numbers, default=' '. :param is_sorted: if True, the spike times are not sorted after loading, - if False, spike times are sorted with `np.sort` + if False, spike times are sorted with `np.sort` :returns: array of spike times """ if not(is_sorted): -- cgit v1.2.3 From 06a72795731c69340685e4bc2a8379626343b56e Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sun, 22 Mar 2015 11:53:50 +0100 Subject: no print statement --- pyspike/spikes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py index c7a1e40..9d7d6f4 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -160,7 +160,7 @@ def generate_poisson_spikes(rate, time_interval, add_aux_spikes=True): intervals = np.random.exponential(1.0/rate, N) # make sure we have enough spikes while T_start + sum(intervals) < T_end: - print T_start + sum(intervals) + # print T_start + sum(intervals) intervals = np.append(intervals, np.random.exponential(1.0/rate, N_append)) spikes = T_start + np.cumsum(intervals) -- cgit v1.2.3 From 3bf9e12e6b5667fb1ea72c969848dacaff3cb470 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 24 Apr 2015 14:58:39 +0200 Subject: further adjustments in spike sync --- pyspike/SpikeTrain.py | 2 +- pyspike/__init__.py | 4 +- pyspike/cython/cython_distance.pyx | 6 +- pyspike/spike_sync.py | 6 +- pyspike/spikes.py | 113 ++++++++++++------------------------- test/test_distance.py | 5 +- 6 files changed, 49 insertions(+), 87 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/SpikeTrain.py b/pyspike/SpikeTrain.py index 041f897..89520c9 100644 --- a/pyspike/SpikeTrain.py +++ b/pyspike/SpikeTrain.py @@ -9,7 +9,7 @@ import numpy as np import collections -class SpikeTrain: +class SpikeTrain(object): """ Class representing spike trains for the PySpike Module.""" def __init__(self, spike_times, interval): diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 76e58a1..a5f9f0a 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -21,5 +21,5 @@ from spike_sync import spike_sync_profile, spike_sync,\ spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix from psth import psth -from spikes import add_auxiliary_spikes, load_spike_trains_from_txt, \ - spike_train_from_string, merge_spike_trains, generate_poisson_spikes +from spikes import load_spike_trains_from_txt, spike_train_from_string, \ + merge_spike_trains, generate_poisson_spikes diff --git a/pyspike/cython/cython_distance.pyx b/pyspike/cython/cython_distance.pyx index 6d998b9..2841da8 100644 --- a/pyspike/cython/cython_distance.pyx +++ b/pyspike/cython/cython_distance.pyx @@ -337,10 +337,10 @@ def spike_distance_cython(double[:] t1, double[:] t2, # coincidence_python ############################################################ cdef inline double get_tau(double[:] spikes1, double[:] spikes2, - int i, int j, max_tau): + int i, int j, double max_tau): cdef double m = 1E100 # some huge number - cdef int N1 = len(spikes1)-1 - cdef int N2 = len(spikes2)-1 + cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 + cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 if i < N1 and i > -1: m = fmin(m, spikes1[i+1]-spikes1[i]) if j < N2 and j > -1: diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index bca6f73..8ddd32c 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -109,10 +109,10 @@ def spike_sync_profile_multi(spike_trains, indices=None, max_tau=None): """ prof_func = partial(spike_sync_profile, max_tau=max_tau) - average_dist, M = _generic_profile_multi(spike_trains, prof_func, + average_prof, M = _generic_profile_multi(spike_trains, prof_func, indices) # average_dist.mul_scalar(1.0/M) # no normalization here! - return average_dist + return average_prof ############################################################ @@ -122,7 +122,7 @@ def spike_sync_multi(spike_trains, indices=None, interval=None, max_tau=None): """ Computes the multi-variate spike synchronization value for a set of spike trains. - :param spike_trains: list of spike trains + :param spike_trains: list of :class:`pyspike.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type indices: list or None diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 9d7d6f4..128873d 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -8,82 +8,46 @@ Distributed under the BSD License """ import numpy as np - - -############################################################ -# add_auxiliary_spikes -############################################################ -def add_auxiliary_spikes(spike_train, time_interval): - """ Adds spikes at the beginning and end of the given time interval. - - :param spike_train: ordered array of spike times - :param time_interval: A pair (T_start, T_end) of values representing the - start and end time of the spike train measurement or - a single value representing the end time, the T_start - is then assuemd as 0. Auxiliary spikes will be added - to the spike train at the beginning and end of this - interval, if they are not yet present. - :type time_interval: pair of doubles or double - :returns: spike train with additional spikes at T_start and T_end. - - """ - try: - T_start = time_interval[0] - T_end = time_interval[1] - except: - T_start = 0 - T_end = time_interval - - assert spike_train[0] >= T_start, \ - "Spike train has events before the given start time" - assert spike_train[-1] <= T_end, \ - "Spike train has events after the given end time" - if spike_train[0] != T_start: - spike_train = np.insert(spike_train, 0, T_start) - if spike_train[-1] != T_end: - spike_train = np.append(spike_train, T_end) - return spike_train +from pyspike import SpikeTrain ############################################################ # spike_train_from_string ############################################################ -def spike_train_from_string(s, sep=' ', is_sorted=False): - """ Converts a string of times into an array of spike times. +def spike_train_from_string(s, interval, sep=' ', is_sorted=False): + """ Converts a string of times into a :class:`pyspike.SpikeTrain`. - :param s: the string with (ordered) spike times + :param s: the string with (ordered) spike times. + :param interval: interval defining the edges of the spike train. + Given as a pair of floats (T0, T1) or a single float T1, where T0=0 is + assumed. :param sep: The separator between the time numbers, default=' '. :param is_sorted: if True, the spike times are not sorted after loading, if False, spike times are sorted with `np.sort` - :returns: array of spike times + :returns: :class:`pyspike.SpikeTrain` """ if not(is_sorted): - return np.sort(np.fromstring(s, sep=sep)) + return SpikeTrain(np.sort(np.fromstring(s, sep=sep)), interval) else: - return np.fromstring(s, sep=sep) + return SpikeTrain(np.fromstring(s, sep=sep), interval) ############################################################ # load_spike_trains_txt ############################################################ -def load_spike_trains_from_txt(file_name, time_interval=None, +def load_spike_trains_from_txt(file_name, interval=None, separator=' ', comment='#', is_sorted=False): """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated by `separator`. Empty lines as well as lines starting with `comment` are - neglected. The `time_interval` represents the start and the end of the - spike trains and it is used to add auxiliary spikes at the beginning and - end of each spike train. However, if `time_interval == None`, no auxiliary - spikes are added, but note that the Spike and ISI distance both require - auxiliary spikes. + neglected. The `interval` represents the start and the end of the + spike trains. :param file_name: The name of the text file. - :param time_interval: A pair (T_start, T_end) of values representing the - start and end time of the spike train measurement - or a single value representing the end time, the - T_start is then assuemd as 0. Auxiliary spikes will - be added to the spike train at the beginning and end - of this interval. + :param interval: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement + or a single value representing the end time, the + T_start is then assuemd as 0. :param separator: The character used to seprate the values in the text file :param comment: Lines starting with this character are ignored. :param sort: If true, the spike times are order via `np.sort`, default=True @@ -94,9 +58,8 @@ def load_spike_trains_from_txt(file_name, time_interval=None, for line in spike_file: if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line, separator, is_sorted) - if time_interval is not None: # add auxil. spikes if times given - spike_train = add_auxiliary_spikes(spike_train, time_interval) + spike_train = spike_train_from_string(line, interval, + separator, is_sorted) spike_trains.append(spike_train) return spike_trains @@ -111,14 +74,14 @@ def merge_spike_trains(spike_trains): :returns: spike train with the merged spike times """ # get the lengths of the spike trains - lens = np.array([len(st) for st in spike_trains]) + lens = np.array([len(st.spikes) for st in spike_trains]) merged_spikes = np.empty(np.sum(lens)) index = 0 # the index for merged_spikes indices = np.zeros_like(lens) # indices of the spike trains index_list = np.arange(len(indices)) # indices of indices of spike trains # that have not yet reached the end # list of the possible events in the spike trains - vals = [spike_trains[i][indices[i]] for i in index_list] + vals = [spike_trains[i].spikes[indices[i]] for i in index_list] while len(index_list) > 0: i = np.argmin(vals) # the next spike is the minimum merged_spikes[index] = vals[i] # put it to the merged spike train @@ -127,33 +90,34 @@ def merge_spike_trains(spike_trains): indices[i] += 1 # next index for the chosen spike train if indices[i] >= lens[i]: # remove spike train index if ended index_list = index_list[index_list != i] - vals = [spike_trains[n][indices[n]] for n in index_list] - return merged_spikes + vals = [spike_trains[n].spikes[indices[n]] for n in index_list] + return SpikeTrain(merged_spikes, [spike_trains[0].t_start, + spike_trains[0].t_end]) ############################################################ # generate_poisson_spikes ############################################################ -def generate_poisson_spikes(rate, time_interval, add_aux_spikes=True): +def generate_poisson_spikes(rate, interval): """ Generates a Poisson spike train with the given rate in the given time interval :param rate: The rate of the spike trains - :param time_interval: A pair (T_start, T_end) of values representing the - start and end time of the spike train measurement or - a single value representing the end time, the T_start - is then assuemd as 0. Auxiliary spikes will be added - to the spike train at the beginning and end of this - interval, if they are not yet present. - :type time_interval: pair of doubles or double - :returns: Poisson spike train + :param interval: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement or + a single value representing the end time, the T_start + is then assuemd as 0. Auxiliary spikes will be added + to the spike train at the beginning and end of this + interval, if they are not yet present. + :type interval: pair of doubles or double + :returns: Poisson spike train as a :class:`pyspike.SpikeTrain` """ try: - T_start = time_interval[0] - T_end = time_interval[1] + T_start = interval[0] + T_end = interval[1] except: T_start = 0 - T_end = time_interval + T_end = interval # roughly how many spikes are required to fill the interval N = max(1, int(1.2 * rate * (T_end-T_start))) N_append = max(1, int(0.1 * rate * (T_end-T_start))) @@ -165,7 +129,4 @@ def generate_poisson_spikes(rate, time_interval, add_aux_spikes=True): np.random.exponential(1.0/rate, N_append)) spikes = T_start + np.cumsum(intervals) spikes = spikes[spikes < T_end] - if add_aux_spikes: - return add_auxiliary_spikes(spikes, time_interval) - else: - return spikes + return SpikeTrain(spikes, interval) diff --git a/test/test_distance.py b/test/test_distance.py index dbb72f1..0fff840 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -250,7 +250,8 @@ def test_multi_spike_sync(): # multivariate regression test spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt", - time_interval=(0, 4000)) + interval=(0, 4000)) + print(spike_trains[0].spikes) f = spk.spike_sync_profile_multi(spike_trains) assert_equal(np.sum(f.y[1:-1]), 39932) assert_equal(np.sum(f.mp[1:-1]), 85554) @@ -339,4 +340,4 @@ if __name__ == "__main__": test_spike_sync() test_multi_isi() test_multi_spike() - test_multi_spike_sync() + # test_multi_spike_sync() -- cgit v1.2.3 From f7ad8e6b23f706a2371e2bc25b533b59f8dea137 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 24 Apr 2015 16:48:24 +0200 Subject: renamed interval -> edges in load functions --- pyspike/spikes.py | 20 ++++++++++---------- test/test_distance.py | 2 +- test/test_spikes.py | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 128873d..9401b6e 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -14,11 +14,11 @@ from pyspike import SpikeTrain ############################################################ # spike_train_from_string ############################################################ -def spike_train_from_string(s, interval, sep=' ', is_sorted=False): +def spike_train_from_string(s, edges, sep=' ', is_sorted=False): """ Converts a string of times into a :class:`pyspike.SpikeTrain`. :param s: the string with (ordered) spike times. - :param interval: interval defining the edges of the spike train. + :param edges: interval defining the edges of the spike train. Given as a pair of floats (T0, T1) or a single float T1, where T0=0 is assumed. :param sep: The separator between the time numbers, default=' '. @@ -27,15 +27,15 @@ def spike_train_from_string(s, interval, sep=' ', is_sorted=False): :returns: :class:`pyspike.SpikeTrain` """ if not(is_sorted): - return SpikeTrain(np.sort(np.fromstring(s, sep=sep)), interval) + return SpikeTrain(np.sort(np.fromstring(s, sep=sep)), edges) else: - return SpikeTrain(np.fromstring(s, sep=sep), interval) + return SpikeTrain(np.fromstring(s, sep=sep), edges) ############################################################ # load_spike_trains_txt ############################################################ -def load_spike_trains_from_txt(file_name, interval=None, +def load_spike_trains_from_txt(file_name, edges, separator=' ', comment='#', is_sorted=False): """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated @@ -44,10 +44,10 @@ def load_spike_trains_from_txt(file_name, interval=None, spike trains. :param file_name: The name of the text file. - :param interval: A pair (T_start, T_end) of values representing the - start and end time of the spike train measurement - or a single value representing the end time, the - T_start is then assuemd as 0. + :param edges: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement + or a single value representing the end time, the + T_start is then assuemd as 0. :param separator: The character used to seprate the values in the text file :param comment: Lines starting with this character are ignored. :param sort: If true, the spike times are order via `np.sort`, default=True @@ -58,7 +58,7 @@ def load_spike_trains_from_txt(file_name, interval=None, for line in spike_file: if len(line) > 1 and not line.startswith(comment): # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line, interval, + spike_train = spike_train_from_string(line, edges, separator, is_sorted) spike_trains.append(spike_train) return spike_trains diff --git a/test/test_distance.py b/test/test_distance.py index 88cf40e..0059001 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -262,7 +262,7 @@ def test_multi_spike_sync(): # multivariate regression test spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt", - interval=[0, 4000]) + edges=[0, 4000]) # extract all spike times spike_times = np.array([]) for st in spike_trains: diff --git a/test/test_spikes.py b/test/test_spikes.py index 6e11c07..d4eb131 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -16,7 +16,7 @@ import pyspike as spk def test_load_from_txt(): spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt", - interval=(0, 4000)) + edges=(0, 4000)) assert len(spike_trains) == 40 # check the first spike train @@ -49,7 +49,7 @@ def check_merged_spikes(merged_spikes, spike_trains): def test_merge_spike_trains(): # first load the data spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt", - interval=(0, 4000)) + edges=(0, 4000)) merged_spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]]) # test if result is sorted -- cgit v1.2.3 From cc8ae1974454307de4c69d9bb2a860538f0adfef Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 27 Apr 2015 17:27:24 +0200 Subject: updated docs --- Readme.rst | 80 +++++++++++++++++++++---------------------- doc/pyspike.rst | 54 +++++++++++++++-------------- pyspike/DiscreteFunc.py | 12 +++---- pyspike/PieceWiseConstFunc.py | 12 +++---- pyspike/PieceWiseLinFunc.py | 12 +++---- pyspike/SpikeTrain.py | 41 ++++++++++++++++------ pyspike/isi_distance.py | 59 ++++++++++++++++--------------- pyspike/spike_distance.py | 58 +++++++++++++++---------------- pyspike/spike_sync.py | 30 ++++++++-------- pyspike/spikes.py | 32 +++++++---------- 10 files changed, 196 insertions(+), 194 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/Readme.rst b/Readme.rst index 03441fc..e80c0f7 100644 --- a/Readme.rst +++ b/Readme.rst @@ -19,6 +19,14 @@ All source codes are available on `Github `_. -To quickly obtain spike trains from such files, PySpike provides the function :code:`load_spike_trains_from_txt`. +To quickly obtain spike trains from such files, PySpike provides the function :func:`.load_spike_trains_from_txt`. .. code:: python @@ -88,22 +98,13 @@ To quickly obtain spike trains from such files, PySpike provides the function :c import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", - time_interval=(0, 4000)) + edges=(0, 4000)) This function expects the name of the data file as first parameter. -Additionally, the time interval of the spike train measurement can be provided as a pair of start- and end-time values. -If the time interval is provided (:code:`time_interval is not None`), auxiliary spikes at the start- and end-time of the interval are added to the spike trains. +Furthermore, the time interval of the spike train measurement (edges of the spike trains) should be provided as a pair of start- and end-time values. Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function). -As result, :code:`load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. - -If you load spike trains yourself, i.e. from data files with different structure, you can use the helper function :code:`add_auxiliary_spikes` to add the auxiliary spikes at the beginning and end of the observation interval. -Both the ISI and the SPIKE distance computation require the presence of auxiliary spikes, so make sure you have those in your spike trains: +As result, :func:`.load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. -.. code:: python - - spike_train = spk.add_auxiliary_spikes(spike_train, (T_start, T_end)) - # if you provide only a single value, it is interpreted as T_end, while T_start=0 - spike_train = spk.add_auxiliary_spikes(spike_train, T_end) Computing bivariate distances profiles --------------------------------------- @@ -114,19 +115,18 @@ Computing bivariate distances profiles Spike trains are expected to be *sorted*! For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted. - Make sure that all your spike trains are sorted, which is ensured if you use the `load_spike_trains_from_txt` function with the parameter `is_sorted=False`. - If in doubt, use :code:`spike_train = np.sort(spike_train)` to obtain a correctly sorted spike train. - - Furthermore, the spike trains should have auxiliary spikes at the beginning and end of the observation interval. - You can ensure this by providing the :code:`time_interval` in the :code:`load_spike_trains_from_txt` function, or calling :code:`add_auxiliary_spikes` for your spike trains. - The spike trains must have *the same* observation interval! + Make sure that all your spike trains are sorted, which is ensured if you use the :func:`.load_spike_trains_from_txt` function with the parameter `is_sorted=False` (default). + If in doubt, use :meth:`.SpikeTrain.sort()` to ensure a correctly sorted spike train. ----------------------- + If you need to copy a spike train, use the :meth:`.SpikeTrain.copy()` method. + Simple assignment `t2 = t1` does not create a copy of the spike train data, but a reference as `numpy.array` is used for storing the data. + +------------------------------ ISI-distance ............ -The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two spike trains, and plots it with matplotlib: +The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :class:`.SpikeTrain` s, and plots it with matplotlib: .. code:: python @@ -134,18 +134,18 @@ The following code loads some exemplary spike trains, computes the dissimilarity import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0, 4000)) + edges=(0, 4000)) isi_profile = spk.isi_profile(spike_trains[0], spike_trains[1]) x, y = isi_profile.get_plottable_data() plt.plot(x, y, '--k') print("ISI distance: %.8f" % isi_profile.avrg()) plt.show() -The ISI-profile is a piece-wise constant function, and hence the function :code:`isi_profile` returns an instance of the :code:`PieceWiseConstFunc` class. +The ISI-profile is a piece-wise constant function, and hence the function :func:`.isi_profile` returns an instance of the :class:`.PieceWiseConstFunc` class. As shown above, this class allows you to obtain arrays that can be used to plot the function with :code:`plt.plt`, but also to compute the time average, which amounts to the final scalar ISI-distance. -By default, the time average is computed for the whole :code:`PieceWiseConstFunc` function. +By default, the time average is computed for the whole :class:`.PieceWiseConstFunc` function. However, it is also possible to obtain the average of a specific interval by providing a pair of floats defining the start and end of the interval. -In the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals: +For the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals: .. code:: python @@ -168,7 +168,7 @@ where :code:`interval` is optional, as above, and if omitted the ISI-distance is SPIKE-distance .............. -To compute for the spike distance profile you use the function :code:`spike_profile` instead of :code:`isi_profile` above. +To compute for the spike distance profile you use the function :func:`.spike_profile` instead of :code:`isi_profile` above. But the general approach is very similar: .. code:: python @@ -177,7 +177,7 @@ But the general approach is very similar: import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0, 4000)) + edges=(0, 4000)) spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1]) x, y = spike_profile.get_plottable_data() plt.plot(x, y, '--k') @@ -185,9 +185,9 @@ But the general approach is very similar: plt.show() This short example computes and plots the SPIKE-profile of the first two spike trains in the file :code:`PySpike_testdata.txt`. -In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :code:`PieceWiseLinFunc` object. -Just like the :code:`PieceWiseConstFunc` for the ISI-profile, the :code:`PieceWiseLinFunc` provides a :code:`get_plottable_data` member function that returns arrays that can be used directly to plot the function. -Furthermore, the :code:`avrg` member function returns the average of the profile defined as the overall SPIKE distance. +In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :class:`.PieceWiseLinFunc` object. +Just like the :class:`.PieceWiseConstFunc` for the ISI-profile, the :class:`.PieceWiseLinFunc` provides a :meth:`.PieceWiseLinFunc.get_plottable_data` member function that returns arrays that can be used directly to plot the function. +Furthermore, the :meth:`.PieceWiseLinFunc.avrg` member function returns the average of the profile defined as the overall SPIKE distance. As above, you can provide an interval as a pair of floats as well as a sequence of such pairs to :code:`avrg` to specify the averaging interval if required. Again, you can use @@ -217,9 +217,9 @@ SPIKE synchronization SPIKE synchronization is another approach to measure spike synchrony. In contrast to the SPIKE- and ISI-distance, it measures similarity instead of dissimilarity, i.e. higher values represent larger synchrony. Another difference is that the SPIKE synchronization profile is only defined exactly at the spike times, not for the whole interval of the spike trains. -Therefore, it is represented by a :code:`DiscreteFunction`. +Therefore, it is represented by a :class:`.DiscreteFunction`. -To compute for the spike synchronization profile, PySpike provides the function :code:`spike_sync_profile`. +To compute for the spike synchronization profile, PySpike provides the function :func:`.spike_sync_profile`. The general handling of the profile, however, is similar to the other profiles above: .. code:: python @@ -228,11 +228,11 @@ The general handling of the profile, however, is similar to the other profiles a import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0, 4000)) + edges=(0, 4000)) spike_profile = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) x, y = spike_profile.get_plottable_data() -For the direct computation of the overall spike synchronization value within some interval, the :code:`spike_sync` function can be used: +For the direct computation of the overall spike synchronization value within some interval, the :func:`.spike_sync` function can be used: .. code:: python @@ -243,23 +243,23 @@ Computing multivariate profiles and distances ---------------------------------------------- To compute the multivariate ISI-profile, SPIKE-profile or SPIKE-Synchronization profile f a set of spike trains, PySpike provides multi-variate version of the profile function. -The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains: +The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :func:`.isi_profile_multi`, :func:`.spike_profile_multi`, :func:`.spike_sync_profile_multi` functions: .. code:: python spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - time_interval=(0, 4000)) + edges=(0, 4000)) avrg_isi_profile = spk.isi_profile_multi(spike_trains) avrg_spike_profile = spk.spike_profile_multi(spike_trains) avrg_spike_sync_profile = spk.spike_sync_profile_multi(spike_trains) All functions take an optional parameter :code:`indices`, a list of indices that allows to define the spike trains that should be used for the multivariate profile. -As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :code:`isi_distance_multi`, :code:`spike_distance_multi` and :code:`spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value. +As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :func:`.isi_distance_multi`, :func:`.spike_distance_multi` and :func:`.spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value. Those functions also accept an :code:`interval` parameter that can be used to specify the begin and end of the averaging interval as a pair of floats, if neglected the complete interval is used. Another option to characterize large sets of spike trains are distance matrices. Each entry in the distance matrix represents a bivariate distance (similarity for SPIKE-Synchronization) of two spike trains. -The distance matrix is symmetric and has zero values (ones) at the diagonal. +The distance matrix is symmetric and has zero values (ones) at the diagonal and is computed with the functions :func:`.isi_distance_matrix`, :func:`.spike_distance_matrix` and :func:`.spike_sync_matrix`. The following example computes and plots the ISI- and SPIKE-distance matrix as well as the SPIKE-Synchronization-matrix, with different intervals. .. code:: python diff --git a/doc/pyspike.rst b/doc/pyspike.rst index 6aa36e7..a6dc1a0 100644 --- a/doc/pyspike.rst +++ b/doc/pyspike.rst @@ -1,60 +1,64 @@ pyspike package =============== -Submodules ----------- -pyspike.isi_distance module +Classes ---------------------------------------- -.. automodule:: pyspike.isi_distance +SpikeTrain +........................................ +.. automodule:: pyspike.SpikeTrain :members: :undoc-members: :show-inheritance: -pyspike.spike_distance module ----------------------------------------- - -.. automodule:: pyspike.spike_distance +PieceWiseConstFunc +........................................ +.. automodule:: pyspike.PieceWiseConstFunc :members: :undoc-members: :show-inheritance: -pyspike.spike_sync module ----------------------------------------- - -.. automodule:: pyspike.spike_sync +PieceWiseLinFunc +........................................ +.. automodule:: pyspike.PieceWiseLinFunc :members: :undoc-members: :show-inheritance: -pyspike.PieceWiseConstFunc module ----------------------------------------- - -.. automodule:: pyspike.PieceWiseConstFunc +DiscreteFunc +........................................ +.. automodule:: pyspike.DiscreteFunc :members: :undoc-members: :show-inheritance: -pyspike.PieceWiseLinFunc module ----------------------------------------- +Functions +---------- -.. automodule:: pyspike.PieceWiseLinFunc +ISI-distance +........................................ +.. automodule:: pyspike.isi_distance :members: :undoc-members: :show-inheritance: -pyspike.DiscreteFunc module ----------------------------------------- - -.. automodule:: pyspike.DiscreteFunc +SPIKE-distance +........................................ +.. automodule:: pyspike.spike_distance :members: :undoc-members: :show-inheritance: -pyspike.spikes module ----------------------------------------- +SPIKE-synchronization +........................................ +.. automodule:: pyspike.spike_sync + :members: + :undoc-members: + :show-inheritance: +Helper functions +........................................ .. automodule:: pyspike.spikes :members: :undoc-members: diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py index bd13e1f..33b7a81 100644 --- a/pyspike/DiscreteFunc.py +++ b/pyspike/DiscreteFunc.py @@ -1,11 +1,7 @@ -""" -Class representing discrete functions. +# Class representing discrete functions. +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License - -""" from __future__ import print_function import numpy as np @@ -174,7 +170,7 @@ class DiscreteFunc(object): def avrg(self, interval=None): """ Computes the average of the interval sequence: - :math:`a = 1/N sum f_n` where N is the number of intervals. + :math:`a = 1/N \\sum f_n` where N is the number of intervals. :param interval: averaging interval given as a pair of floats, a sequence of pairs for averaging multiple intervals, or diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index dc57ab1..41998ef 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -1,11 +1,7 @@ -""" -Class representing piece-wise constant functions. +# Class representing piece-wise constant functions. +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License - -""" from __future__ import print_function import numpy as np @@ -103,7 +99,7 @@ class PieceWiseConstFunc(object): def avrg(self, interval=None): """ Computes the average of the piece-wise const function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + :math:`a = 1/T \int_0^T f(x) dx` where T is the length of the interval. :param interval: averaging interval given as a pair of floats, a sequence of pairs for averaging multiple intervals, or diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index bc0aa2a..f2442be 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -1,11 +1,7 @@ -""" -Class representing piece-wise linear functions. +# Class representing piece-wise linear functions. +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License - -""" from __future__ import print_function import numpy as np @@ -123,7 +119,7 @@ class PieceWiseLinFunc: def avrg(self, interval=None): """ Computes the average of the piece-wise linear function: - :math:`a = 1/T int_0^T f(x) dx` where T is the length of the interval. + :math:`a = 1/T \int_0^T f(x) dx` where T is the interval length. :param interval: averaging interval given as a pair of floats, a sequence of pairs for averaging multiple intervals, or diff --git a/pyspike/SpikeTrain.py b/pyspike/SpikeTrain.py index d586fe0..a02b7ab 100644 --- a/pyspike/SpikeTrain.py +++ b/pyspike/SpikeTrain.py @@ -1,9 +1,6 @@ -""" Module containing the class representing spike trains for PySpike. - -Copyright 2015, Mario Mulansky - -Distributed under the BSD License -""" +# Module containing the class representing spike trains for PySpike. +# Copyright 2015, Mario Mulansky +# Distributed under the BSD License import numpy as np @@ -11,15 +8,22 @@ import numpy as np class SpikeTrain(object): """ Class representing spike trains for the PySpike Module.""" - def __init__(self, spike_times, edges): - """ Constructs the SpikeTrain + def __init__(self, spike_times, edges, is_sorted=True): + """ Constructs the SpikeTrain. + :param spike_times: ordered array of spike times. :param edges: The edges of the spike train. Given as a pair of floats - (T0, T1) or a single float T1, where then T0=0 is assumed. + (T0, T1) or a single float T1, where then T0=0 is + assumed. + :param is_sorted: If `False`, the spike times will sorted by `np.sort`. + """ # TODO: sanity checks - self.spikes = np.array(spike_times, dtype=float) + if is_sorted: + self.spikes = np.array(spike_times, dtype=float) + else: + self.spikes = np.sort(np.array(spike_times, dtype=float)) try: self.t_start = float(edges[0]) @@ -27,3 +31,20 @@ class SpikeTrain(object): except: self.t_start = 0.0 self.t_end = float(edges) + + def sort(self): + """ Sorts the spike times of this spike train using `np.sort` + """ + self.spikes = np.sort(self.spikes) + + def copy(self): + """ Returns a copy of this spike train. + Use this function if you want to create a real (deep) copy of this + spike train. Simple assignment `t2 = t1` does not create a copy of the + spike train data, but a reference as `numpy.array` is used for storing + the data. + + :return: :class:`.SpikeTrain` copy of this spike train. + + """ + return SpikeTrain(self.spikes.copy(), [self.t_start, self.t_end]) diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index cb8ef54..aeab0df 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -1,11 +1,6 @@ -""" - -Module containing several functions to compute the ISI profiles and distances - -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License -""" +# Module containing several functions to compute the ISI profiles and distances +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License from pyspike import PieceWiseConstFunc from pyspike.generic import _generic_profile_multi, _generic_distance_matrix @@ -15,16 +10,16 @@ from pyspike.generic import _generic_profile_multi, _generic_distance_matrix # isi_profile ############################################################ def isi_profile(spike_train1, spike_train2): - """ Computes the isi-distance profile :math:`S_{isi}(t)` of the two given - spike trains. Retruns the profile as a PieceWiseConstFunc object. The S_isi - values are defined positive S_isi(t)>=0. + """ Computes the isi-distance profile :math:`I(t)` of the two given + spike trains. Retruns the profile as a PieceWiseConstFunc object. The + ISI-values are defined positive :math:`I(t)>=0`. :param spike_train1: First spike train. - :type spike_train1: :class:`pyspike.SpikeTrain` + :type spike_train1: :class:`.SpikeTrain` :param spike_train2: Second spike train. - :type spike_train2: :class:`pyspike.SpikeTrain` - :returns: The isi-distance profile :math:`S_{isi}(t)` - :rtype: :class:`pyspike.function.PieceWiseConstFunc` + :type spike_train2: :class:`.SpikeTrain` + :returns: The isi-distance profile :math:`I(t)` + :rtype: :class:`.PieceWiseConstFunc` """ # check whether the spike trains are defined for the same interval @@ -54,20 +49,20 @@ Falling back to slow python backend.") # isi_distance ############################################################ def isi_distance(spike_train1, spike_train2, interval=None): - """ Computes the isi-distance I of the given spike trains. The + """ Computes the ISI-distance :math:`D_I` of the given spike trains. The isi-distance is the integral over the isi distance profile - :math:`S_{isi}(t)`: + :math:`I(t)`: - .. math:: I = \int_{T_0}^{T_1} S_{isi}(t) dt. + .. math:: D_I = \\int_{T_0}^{T_1} I(t) dt. :param spike_train1: First spike train. - :type spike_train1: :class:`pyspike.SpikeTrain` + :type spike_train1: :class:`.SpikeTrain` :param spike_train2: Second spike train. - :type spike_train2: :class:`pyspike.SpikeTrain` + :type spike_train2: :class:`.SpikeTrain` :param interval: averaging interval given as a pair of floats (T0, T1), if None the average over the whole function is computed. :type interval: Pair of floats or None. - :returns: The isi-distance I. + :returns: The isi-distance :math:`D_I`. :rtype: double """ return isi_profile(spike_train1, spike_train2).avrg(interval) @@ -79,15 +74,17 @@ def isi_distance(spike_train1, spike_train2, interval=None): def isi_profile_multi(spike_trains, indices=None): """ computes the multi-variate isi distance profile for a set of spike trains. That is the average isi-distance of all pairs of spike-trains: - S_isi(t) = 2/((N(N-1)) sum_{} S_{isi}^{i,j}, + + .. math:: = \\frac{2}{N(N-1)} \\sum_{} I^{i,j}, + where the sum goes over all pairs - :param spike_trains: list of :class:`pyspike.SpikeTrain` + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type state: list or None - :returns: The averaged isi profile :math:`(t)` - :rtype: :class:`pyspike.function.PieceWiseConstFunc` + :returns: The averaged isi profile :math:`` + :rtype: :class:`.PieceWiseConstFunc` """ average_dist, M = _generic_profile_multi(spike_trains, isi_profile, indices) @@ -101,16 +98,18 @@ def isi_profile_multi(spike_trains, indices=None): def isi_distance_multi(spike_trains, indices=None, interval=None): """ computes the multi-variate isi-distance for a set of spike-trains. That is the time average of the multi-variate spike profile: - I = \int_0^T 2/((N(N-1)) sum_{} S_{isi}^{i,j}, + + .. math:: D_I = \\int_0^T \\frac{2}{N(N-1)} \\sum_{} I^{i,j}, + where the sum goes over all pairs - :param spike_trains: list of spike trains + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :param interval: averaging interval given as a pair of floats, if None the average over the whole function is computed. :type interval: Pair of floats or None. - :returns: The time-averaged isi distance :math:`I` + :returns: The time-averaged multivariate ISI distance :math:`D_I` :rtype: double """ return isi_profile_multi(spike_trains, indices).avrg(interval) @@ -122,7 +121,7 @@ def isi_distance_multi(spike_trains, indices=None, interval=None): def isi_distance_matrix(spike_trains, indices=None, interval=None): """ Computes the time averaged isi-distance of all pairs of spike-trains. - :param spike_trains: list of :class:`pyspike.SpikeTrain` + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type indices: list or None @@ -130,7 +129,7 @@ def isi_distance_matrix(spike_trains, indices=None, interval=None): the average over the whole function is computed. :type interval: Pair of floats or None. :returns: 2D array with the pair wise time average isi distances - :math:`I_{ij}` + :math:`D_{I}^{ij}` :rtype: np.array """ return _generic_distance_matrix(spike_trains, isi_distance, diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index 8d03d70..cc620d4 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -1,11 +1,6 @@ -""" - -Module containing several functions to compute SPIKE profiles and distances - -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License -""" +# Module containing several functions to compute SPIKE profiles and distances +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License from pyspike import PieceWiseLinFunc from pyspike.generic import _generic_profile_multi, _generic_distance_matrix @@ -15,16 +10,16 @@ from pyspike.generic import _generic_profile_multi, _generic_distance_matrix # spike_profile ############################################################ def spike_profile(spike_train1, spike_train2): - """ Computes the spike-distance profile S_spike(t) of the two given spike - trains. Returns the profile as a PieceWiseLinFunc object. The S_spike - values are defined positive S_spike(t)>=0. + """ Computes the spike-distance profile :math:`S(t)` of the two given spike + trains. Returns the profile as a PieceWiseLinFunc object. The SPIKE-values + are defined positive :math:`S(t)>=0`. :param spike_train1: First spike train. - :type spike_train1: :class:`pyspike.SpikeTrain` + :type spike_train1: :class:`.SpikeTrain` :param spike_train2: Second spike train. - :type spike_train2: :class:`pyspike.SpikeTrain` - :returns: The spike-distance profile :math:`S_{spike}(t)`. - :rtype: :class:`pyspike.function.PieceWiseLinFunc` + :type spike_train2: :class:`.SpikeTrain` + :returns: The spike-distance profile :math:`S(t)`. + :rtype: :class:`.PieceWiseLinFunc` """ # check whether the spike trains are defined for the same interval @@ -56,15 +51,15 @@ Falling back to slow python backend.") # spike_distance ############################################################ def spike_distance(spike_train1, spike_train2, interval=None): - """ Computes the spike-distance S of the given spike trains. The - spike-distance is the integral over the isi distance profile S_spike(t): + """ Computes the spike-distance :math:`D_S` of the given spike trains. The + spike-distance is the integral over the isi distance profile :math:`S(t)`: - .. math:: S = \int_{T_0}^{T_1} S_{spike}(t) dt. + .. math:: D_S = \int_{T_0}^{T_1} S(t) dt. :param spike_train1: First spike train. - :type spike_train1: :class:`pyspike.SpikeTrain` + :type spike_train1: :class:`.SpikeTrain` :param spike_train2: Second spike train. - :type spike_train2: :class:`pyspike.SpikeTrain` + :type spike_train2: :class:`.SpikeTrain` :param interval: averaging interval given as a pair of floats (T0, T1), if None the average over the whole function is computed. :type interval: Pair of floats or None. @@ -81,15 +76,17 @@ def spike_distance(spike_train1, spike_train2, interval=None): def spike_profile_multi(spike_trains, indices=None): """ Computes the multi-variate spike distance profile for a set of spike trains. That is the average spike-distance of all pairs of spike-trains: - :math:`S_spike(t) = 2/((N(N-1)) sum_{} S_{spike}^{i, j}`, + + .. math:: = \\frac{2}{N(N-1)} \\sum_{} S^{i, j}`, + where the sum goes over all pairs - :param spike_trains: list of spike trains + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type indices: list or None - :returns: The averaged spike profile :math:`(t)` - :rtype: :class:`pyspike.function.PieceWiseLinFunc` + :returns: The averaged spike profile :math:`(t)` + :rtype: :class:`.PieceWiseLinFunc` """ average_dist, M = _generic_profile_multi(spike_trains, spike_profile, @@ -104,17 +101,20 @@ def spike_profile_multi(spike_trains, indices=None): def spike_distance_multi(spike_trains, indices=None, interval=None): """ Computes the multi-variate spike distance for a set of spike trains. That is the time average of the multi-variate spike profile: - S_{spike} = \int_0^T 2/((N(N-1)) sum_{} S_{spike}^{i, j} dt + + .. math:: D_S = \\int_0^T \\frac{2}{N(N-1)} \\sum_{} + S^{i, j} dt + where the sum goes over all pairs - :param spike_trains: list of :class:`pyspike.SpikeTrain` + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type indices: list or None :param interval: averaging interval given as a pair of floats, if None the average over the whole function is computed. :type interval: Pair of floats or None. - :returns: The averaged spike distance S. + :returns: The averaged multi-variate spike distance :math:`D_S`. :rtype: double """ return spike_profile_multi(spike_trains, indices).avrg(interval) @@ -126,7 +126,7 @@ def spike_distance_multi(spike_trains, indices=None, interval=None): def spike_distance_matrix(spike_trains, indices=None, interval=None): """ Computes the time averaged spike-distance of all pairs of spike-trains. - :param spike_trains: list of :class:`pyspike.SpikeTrain` + :param spike_trains: list of :class:`.SpikeTrain` :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :type indices: list or None @@ -134,7 +134,7 @@ def spike_distance_matrix(spike_trains, indices=None, interval=None): the average over the whole function is computed. :type interval: Pair of floats or None. :returns: 2D array with the pair wise time average spike distances - :math:`S_{ij}` + :math:`D_S^{ij}` :rtype: np.array """ return _generic_distance_matrix(spike_trains, spike_distance, diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 8ddd32c..9d2e363 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -1,12 +1,7 @@ -""" - -Module containing several functions to compute SPIKE-Synchronization profiles -and distances - -Copyright 2014-2015, Mario Mulansky - -Distributed under the BSD License -""" +# Module containing several functions to compute SPIKE-Synchronization profiles +# and distances +# Copyright 2014-2015, Mario Mulansky +# Distributed under the BSD License from functools import partial from pyspike import DiscreteFunc @@ -27,7 +22,7 @@ def spike_sync_profile(spike_train1, spike_train2, max_tau=None): :param spike_train2: Second spike train. :type spike_train2: :class:`pyspike.SpikeTrain` :param max_tau: Maximum coincidence window size. If 0 or `None`, the - coincidence window has no upper bound. + coincidence window has no upper bound. :returns: The spike-distance profile :math:`S_{sync}(t)`. :rtype: :class:`pyspike.function.DiscreteFunction` @@ -77,12 +72,13 @@ def spike_sync(spike_train1, spike_train2, interval=None, max_tau=None): :param spike_train2: Second spike train. :type spike_train2: :class:`pyspike.SpikeTrain` :param interval: averaging interval given as a pair of floats (T0, T1), - if None the average over the whole function is computed. + if `None` the average over the whole function is computed. :type interval: Pair of floats or None. :param max_tau: Maximum coincidence window size. If 0 or `None`, the - coincidence window has no upper bound. + coincidence window has no upper bound. :returns: The spike synchronization value. - :rtype: double + :rtype: `double` + """ return spike_sync_profile(spike_train1, spike_train2, max_tau).avrg(interval) @@ -103,7 +99,7 @@ def spike_sync_profile_multi(spike_trains, indices=None, max_tau=None): if None all given spike trains are used (default=None) :type indices: list or None :param max_tau: Maximum coincidence window size. If 0 or `None`, the - coincidence window has no upper bound. + coincidence window has no upper bound. :returns: The multi-variate spike sync profile :math:`(t)` :rtype: :class:`pyspike.function.DiscreteFunction` @@ -130,9 +126,10 @@ def spike_sync_multi(spike_trains, indices=None, interval=None, max_tau=None): the average over the whole function is computed. :type interval: Pair of floats or None. :param max_tau: Maximum coincidence window size. If 0 or `None`, the - coincidence window has no upper bound. + coincidence window has no upper bound. :returns: The multi-variate spike synchronization value SYNC. :rtype: double + """ return spike_sync_profile_multi(spike_trains, indices, max_tau).avrg(interval) @@ -153,10 +150,11 @@ def spike_sync_matrix(spike_trains, indices=None, interval=None, max_tau=None): the average over the whole function is computed. :type interval: Pair of floats or None. :param max_tau: Maximum coincidence window size. If 0 or `None`, the - coincidence window has no upper bound. + coincidence window has no upper bound. :returns: 2D array with the pair wise time spike synchronization values :math:`SYNC_{ij}` :rtype: np.array + """ dist_func = partial(spike_sync, max_tau=max_tau) return _generic_distance_matrix(spike_trains, dist_func, diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 9401b6e..35d8533 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -1,11 +1,6 @@ -""" spikes.py - -Module containing several function to load and transform spike trains - -Copyright 2014, Mario Mulansky - -Distributed under the BSD License -""" +# Module containing several function to load and transform spike trains +# Copyright 2014, Mario Mulansky +# Distributed under the BSD License import numpy as np from pyspike import SpikeTrain @@ -15,21 +10,18 @@ from pyspike import SpikeTrain # spike_train_from_string ############################################################ def spike_train_from_string(s, edges, sep=' ', is_sorted=False): - """ Converts a string of times into a :class:`pyspike.SpikeTrain`. + """ Converts a string of times into a :class:`.SpikeTrain`. :param s: the string with (ordered) spike times. :param edges: interval defining the edges of the spike train. - Given as a pair of floats (T0, T1) or a single float T1, where T0=0 is - assumed. + Given as a pair of floats (T0, T1) or a single float T1, + where T0=0 is assumed. :param sep: The separator between the time numbers, default=' '. :param is_sorted: if True, the spike times are not sorted after loading, if False, spike times are sorted with `np.sort` - :returns: :class:`pyspike.SpikeTrain` + :returns: :class:`.SpikeTrain` """ - if not(is_sorted): - return SpikeTrain(np.sort(np.fromstring(s, sep=sep)), edges) - else: - return SpikeTrain(np.fromstring(s, sep=sep), edges) + return SpikeTrain(np.fromstring(s, sep=sep), edges, is_sorted) ############################################################ @@ -40,7 +32,7 @@ def load_spike_trains_from_txt(file_name, edges, """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated by `separator`. Empty lines as well as lines starting with `comment` are - neglected. The `interval` represents the start and the end of the + neglected. The `edges` represents the start and the end of the spike trains. :param file_name: The name of the text file. @@ -51,7 +43,7 @@ def load_spike_trains_from_txt(file_name, edges, :param separator: The character used to seprate the values in the text file :param comment: Lines starting with this character are ignored. :param sort: If true, the spike times are order via `np.sort`, default=True - :returns: list of spike trains + :returns: list of :class:`.SpikeTrain` """ spike_trains = [] spike_file = open(file_name, 'r') @@ -70,7 +62,7 @@ def load_spike_trains_from_txt(file_name, edges, def merge_spike_trains(spike_trains): """ Merges a number of spike trains into a single spike train. - :param spike_trains: list of arrays of spike times + :param spike_trains: list of :class:`.SpikeTrain` :returns: spike train with the merged spike times """ # get the lengths of the spike trains @@ -110,7 +102,7 @@ def generate_poisson_spikes(rate, interval): to the spike train at the beginning and end of this interval, if they are not yet present. :type interval: pair of doubles or double - :returns: Poisson spike train as a :class:`pyspike.SpikeTrain` + :returns: Poisson spike train as a :class:`.SpikeTrain` """ try: T_start = interval[0] -- cgit v1.2.3 From d6462d271aeaf1be635cbc7c4317ae6a3b30b63f Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 12 Jun 2015 14:55:07 +0200 Subject: implement __getitem__ and __len__ for SpikeTrain This allows to use SpikeTrain objects to be used in many applications as if they were arrays with spike times. --- pyspike/SpikeTrain.py | 15 +++++++++++++++ pyspike/spikes.py | 3 ++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/SpikeTrain.py b/pyspike/SpikeTrain.py index 9127b60..4b59a5d 100644 --- a/pyspike/SpikeTrain.py +++ b/pyspike/SpikeTrain.py @@ -32,6 +32,21 @@ class SpikeTrain(object): self.t_start = 0.0 self.t_end = float(edges) + def __getitem__(self, index): + """ Returns the time of the spike given by index. + + :param index: Index of the spike. + :return: spike time. + """ + return self.spikes[index] + + def __len__(self): + """ Returns the number of spikes. + + :return: Number of spikes. + """ + return len(self.spikes) + def sort(self): """ Sorts the spike times of this spike train using `np.sort` """ diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 35d8533..b18d7eb 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -28,7 +28,8 @@ def spike_train_from_string(s, edges, sep=' ', is_sorted=False): # load_spike_trains_txt ############################################################ def load_spike_trains_from_txt(file_name, edges, - separator=' ', comment='#', is_sorted=False): + separator=' ', comment='#', is_sorted=False, + ignore_empty_lines=True): """ Loads a number of spike trains from a text file. Each line of the text file should contain one spike train as a sequence of spike times separated by `separator`. Empty lines as well as lines starting with `comment` are -- cgit v1.2.3 From 0d8af2c97d766a4fa514f0232189bc17c31c67a0 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 24 Mar 2016 15:54:48 +0100 Subject: +function for saving spike trains to txt files save_spike_trains_to_txt allows to save spike train data into txt files which can then be loaded via load_spike_trains_from_txt again. --- pyspike/__init__.py | 4 ++-- pyspike/spikes.py | 37 +++++++++++++++++++++++++++++-------- 2 files changed, 31 insertions(+), 10 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 069090b..4d75786 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -23,8 +23,8 @@ from .spike_sync import spike_sync_profile, spike_sync,\ spike_sync_profile_multi, spike_sync_multi, spike_sync_matrix from .psth import psth -from .spikes import load_spike_trains_from_txt, spike_train_from_string, \ - merge_spike_trains, generate_poisson_spikes +from .spikes import load_spike_trains_from_txt, save_spike_trains_to_txt, \ + spike_train_from_string, merge_spike_trains, generate_poisson_spikes # define the __version__ following # http://stackoverflow.com/questions/17583443 diff --git a/pyspike/spikes.py b/pyspike/spikes.py index b18d7eb..966ad69 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -2,6 +2,7 @@ # Copyright 2014, Mario Mulansky # Distributed under the BSD License + import numpy as np from pyspike import SpikeTrain @@ -25,7 +26,7 @@ def spike_train_from_string(s, edges, sep=' ', is_sorted=False): ############################################################ -# load_spike_trains_txt +# load_spike_trains_from_txt ############################################################ def load_spike_trains_from_txt(file_name, edges, separator=' ', comment='#', is_sorted=False, @@ -47,16 +48,36 @@ def load_spike_trains_from_txt(file_name, edges, :returns: list of :class:`.SpikeTrain` """ spike_trains = [] - spike_file = open(file_name, 'r') - for line in spike_file: - if len(line) > 1 and not line.startswith(comment): - # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line, edges, - separator, is_sorted) - spike_trains.append(spike_train) + with open(file_name, 'r') as spike_file: + for line in spike_file: + if len(line) > 1 and not line.startswith(comment): + # use only the lines with actual data and not commented + spike_train = spike_train_from_string(line, edges, + separator, is_sorted) + spike_trains.append(spike_train) return spike_trains +############################################################ +# save_spike_trains_to_txt +############################################################ +def save_spike_trains_to_txt(spike_trains, file_name, + separator=' ', precision=8): + """ Saves the given spike trains into a file with the given file name. + Each spike train will be stored in one line in the text file with the times + separated by `separator`. + + :param spike_trains: List of :class:`.SpikeTrain` objects + :param file_name: The name of the text file. + """ + # format string to print the spike times with given precision + format_str = "{:0.%de}" % precision + with open(file_name, 'w') as spike_file: + for st in spike_trains: + s = separator.join(map(format_str.format, st.spikes)) + spike_file.write(s+'\n') + + ############################################################ # merge_spike_trains ############################################################ -- cgit v1.2.3 From adab2aa6d573702ca685e8242fd7edccb841ff8c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 24 Mar 2016 16:27:51 +0100 Subject: add empty spike trains when loading from txt treatment of empty lines was incorrect. now empty spike trains are created from empty lines in the txt file if parameter ignore_empty_lines=False is given. --- pyspike/spikes.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 966ad69..271adcb 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -50,11 +50,15 @@ def load_spike_trains_from_txt(file_name, edges, spike_trains = [] with open(file_name, 'r') as spike_file: for line in spike_file: - if len(line) > 1 and not line.startswith(comment): - # use only the lines with actual data and not commented - spike_train = spike_train_from_string(line, edges, - separator, is_sorted) - spike_trains.append(spike_train) + if not line.startswith(comment): # ignore comments + if len(line) > 1: + # ignore empty lines + spike_train = spike_train_from_string(line, edges, + separator, is_sorted) + spike_trains.append(spike_train) + elif not(ignore_empty_lines): + # add empty spike train + spike_trains.append(SpikeTrain([], edges)) return spike_trains -- cgit v1.2.3 From 04d25294ca65246d9397e981767c3d7c737626c3 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 24 Mar 2016 16:37:02 +0100 Subject: quick fix in format string --- pyspike/spikes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 271adcb..ab6d4c4 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -75,7 +75,7 @@ def save_spike_trains_to_txt(spike_trains, file_name, :param file_name: The name of the text file. """ # format string to print the spike times with given precision - format_str = "{:0.%de}" % precision + format_str = "{0:.%de}" % precision with open(file_name, 'w') as spike_file: for st in spike_trains: s = separator.join(map(format_str.format, st.spikes)) -- cgit v1.2.3 From 4691d0e77a024fbc73d1098ee557d65f8f2ddc89 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sat, 18 Jun 2016 16:27:51 -0700 Subject: added function to import time series new function import_spike_trains_from_time_series that loads spike trains from time series. --- pyspike/__init__.py | 1 + pyspike/spikes.py | 25 +++++++++++++++++++++++++ test/test_spikes.py | 19 +++++++++++++++++++ 3 files changed, 45 insertions(+) (limited to 'pyspike/spikes.py') diff --git a/pyspike/__init__.py b/pyspike/__init__.py index 069090b..1e879c4 100644 --- a/pyspike/__init__.py +++ b/pyspike/__init__.py @@ -24,6 +24,7 @@ from .spike_sync import spike_sync_profile, spike_sync,\ from .psth import psth from .spikes import load_spike_trains_from_txt, spike_train_from_string, \ + import_spike_trains_from_time_series, \ merge_spike_trains, generate_poisson_spikes # define the __version__ following diff --git a/pyspike/spikes.py b/pyspike/spikes.py index b18d7eb..1bf474c 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -57,6 +57,31 @@ def load_spike_trains_from_txt(file_name, edges, return spike_trains +def import_spike_trains_from_time_series(file_name, start_time, time_bin, + separator=None, comment='#'): + """ Imports spike trains from time series consisting of 0 and 1 denoting + the absence or presence of a spike. Each line in the data file represents + one spike train. + + :param file_name: The name of the data file containing the time series. + :param edges: A pair (T_start, T_end) of values representing the + start and end time of the spike train measurement + or a single value representing the end time, the + T_start is then assuemd as 0. + :param separator: The character used to seprate the values in the text file + :param comment: Lines starting with this character are ignored. + + """ + data = np.loadtxt(file_name, comments=comment, delimiter=separator) + time_points = start_time + time_bin + np.arange(len(data[0, :]))*time_bin + spike_trains = [] + for time_series in data: + spike_trains.append(SpikeTrain(time_points[time_series > 0], + edges=[start_time, + time_points[-1]])) + return spike_trains + + ############################################################ # merge_spike_trains ############################################################ diff --git a/test/test_spikes.py b/test/test_spikes.py index 609a819..bcface2 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -17,6 +17,10 @@ import os TEST_PATH = os.path.dirname(os.path.realpath(__file__)) TEST_DATA = os.path.join(TEST_PATH, "PySpike_testdata.txt") +TIME_SERIES_DATA = os.path.join(TEST_PATH, "time_series.txt") +TIME_SERIES_SPIKES = os.path.join(TEST_PATH, "time_series_spike_trains.txt") + + def test_load_from_txt(): spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000)) assert len(spike_trains) == 40 @@ -33,6 +37,21 @@ def test_load_from_txt(): assert spike_train.t_end == 4000 +def test_load_time_series(): + spike_trains = spk.import_spike_trains_from_time_series(TIME_SERIES_DATA, + start_time=0, + time_bin=1) + assert len(spike_trains) == 40 + spike_trains_check = spk.load_spike_trains_from_txt(TIME_SERIES_SPIKES, + edges=(0, 4000)) + + # check spike trains + for n in range(len(spike_trains)): + assert_equal(spike_trains[n].spikes, spike_trains_check[n].spikes) + assert_equal(spike_trains[n].t_start, 0) + assert_equal(spike_trains[n].t_end, 4000) + + def check_merged_spikes(merged_spikes, spike_trains): # create a flat array with all spike events all_spikes = np.array([]) -- cgit v1.2.3 From fb1e0d5fd4dfc332298668a225415c5b795c0192 Mon Sep 17 00:00:00 2001 From: Jonathan Jouty Date: Sun, 4 Feb 2018 11:06:06 +0000 Subject: Make merge_spike_trains work with empty spike trains, and faster 1. Fixes https://github.com/mariomulansky/PySpike/issues/30 2. Code is faster 3. Add test case --- pyspike/spikes.py | 22 ++++------------------ test/test_spikes.py | 10 ++++++++++ 2 files changed, 14 insertions(+), 18 deletions(-) (limited to 'pyspike/spikes.py') diff --git a/pyspike/spikes.py b/pyspike/spikes.py index 486a4a0..cf47043 100644 --- a/pyspike/spikes.py +++ b/pyspike/spikes.py @@ -116,24 +116,10 @@ def merge_spike_trains(spike_trains): :param spike_trains: list of :class:`.SpikeTrain` :returns: spike train with the merged spike times """ - # get the lengths of the spike trains - lens = np.array([len(st.spikes) for st in spike_trains]) - merged_spikes = np.empty(np.sum(lens)) - index = 0 # the index for merged_spikes - indices = np.zeros_like(lens) # indices of the spike trains - index_list = np.arange(len(indices)) # indices of indices of spike trains - # that have not yet reached the end - # list of the possible events in the spike trains - vals = [spike_trains[i].spikes[indices[i]] for i in index_list] - while len(index_list) > 0: - i = np.argmin(vals) # the next spike is the minimum - merged_spikes[index] = vals[i] # put it to the merged spike train - i = index_list[i] - index += 1 # next index of merged spike train - indices[i] += 1 # next index for the chosen spike train - if indices[i] >= lens[i]: # remove spike train index if ended - index_list = index_list[index_list != i] - vals = [spike_trains[n].spikes[indices[n]] for n in index_list] + # concatenating and sorting with numpy is fast, it also means we can handle + # empty spike trains + merged_spikes = np.concatenate([st.spikes for st in spike_trains]) + merged_spikes.sort() return SpikeTrain(merged_spikes, [spike_trains[0].t_start, spike_trains[0].t_end]) diff --git a/test/test_spikes.py b/test/test_spikes.py index bcface2..ee505b5 100644 --- a/test/test_spikes.py +++ b/test/test_spikes.py @@ -85,6 +85,16 @@ def test_merge_spike_trains(): check_merged_spikes(merged_spikes.spikes, [st.spikes for st in spike_trains]) +def test_merge_empty_spike_trains(): + # first load the data + spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000)) + # take two non-empty trains, and one empty one + empty = spk.SpikeTrain([],[spike_trains[0].t_start,spike_trains[0].t_end]) + merged_spikes = spk.merge_spike_trains([spike_trains[0], empty, spike_trains[1]]) + # test if result is sorted + assert((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all()) + # we don't need to check more, that's done by test_merge_spike_trains + if __name__ == "main": test_load_from_txt() -- cgit v1.2.3