summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMario Mulansky <mario.mulansky@gmx.net>2014-10-10 17:04:04 +0200
committerMario Mulansky <mario.mulansky@gmx.net>2014-10-10 17:04:04 +0200
commita769a03d089ac0c61e2155239a28665c9316e14a (patch)
tree766347541743aab1baeb07e9d75d008981c553d6
parent62f792fa52801234d4f9c33800a44b0308e9b8ab (diff)
added load_txt function, some restructuring
-rw-r--r--Readme.md2
-rwxr-xr-xexamples/SPIKY_testdata.txt3
-rw-r--r--examples/test_data.py11
-rw-r--r--pyspike/__init__.py5
-rw-r--r--pyspike/distances.py25
-rw-r--r--pyspike/spikes.py72
-rwxr-xr-xtest/SPIKY_testdata.txt3
-rw-r--r--test/test_distance.py24
-rw-r--r--test/test_merge_spikes.py49
-rw-r--r--test/test_spikes.py84
10 files changed, 176 insertions, 102 deletions
diff --git a/Readme.md b/Readme.md
index 368eef4..8b84ebd 100644
--- a/Readme.md
+++ b/Readme.md
@@ -1,7 +1,7 @@
# PySpike
PySpike is a Python library for numerical analysis of spike train similarity.
-Its core functionality are the implementation of the bivariate [ISI and SPIKE distance](http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony).
+Its core functionality is the implementation of the bivariate [ISI and SPIKE distance](http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony).
Additionally, it allows to compute multi-variate spike train distances, averaging and general spike train processing.
All source codes are published under the liberal [MIT License](http://opensource.org/licenses/MIT).
diff --git a/examples/SPIKY_testdata.txt b/examples/SPIKY_testdata.txt
index 8fa3fcf..c8bea67 100755
--- a/examples/SPIKY_testdata.txt
+++ b/examples/SPIKY_testdata.txt
@@ -1,7 +1,10 @@
64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3
65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944
+# test comment
69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1
59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9
+# empty line
+
59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9
66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9
66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2
diff --git a/examples/test_data.py b/examples/test_data.py
index ff7b510..dcd0f20 100644
--- a/examples/test_data.py
+++ b/examples/test_data.py
@@ -7,17 +7,14 @@ import matplotlib.pyplot as plt
import pyspike as spk
-# first load the data
-spike_trains = []
-spike_file = open("SPIKY_testdata.txt", 'r')
-for line in spike_file:
- spike_trains.append(spk.spike_train_from_string(line))
+spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
+ time_interval=(0,4000))
# plot the spike time
for (i,spikes) in enumerate(spike_trains):
plt.plot(spikes, i*np.ones_like(spikes), 'o')
-f = spk.isi_distance(spike_trains[0], spike_trains[1], 4000)
+f = spk.isi_distance(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()
plt.figure()
@@ -27,7 +24,7 @@ print("Average: %.8f" % f.avrg())
print("Absolute average: %.8f" % f.abs_avrg())
-f = spk.spike_distance(spike_trains[0], spike_trains[1], 4000)
+f = spk.spike_distance(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()
print(x)
print(y)
diff --git a/pyspike/__init__.py b/pyspike/__init__.py
index 21005e9..2703f65 100644
--- a/pyspike/__init__.py
+++ b/pyspike/__init__.py
@@ -1,6 +1,7 @@
__all__ = ["function", "distances", "spikes"]
from function import PieceWiseConstFunc, PieceWiseLinFunc
-from distances import add_auxiliary_spikes, isi_distance, spike_distance, \
+from distances import isi_distance, spike_distance, \
isi_distance_multi, spike_distance_multi, isi_distance_matrix
-from spikes import spike_train_from_string, merge_spike_trains
+from spikes import add_auxiliary_spikes, load_spike_trains_from_txt, \
+ spike_train_from_string, merge_spike_trains
diff --git a/pyspike/distances.py b/pyspike/distances.py
index f78c0d4..da603ad 100644
--- a/pyspike/distances.py
+++ b/pyspike/distances.py
@@ -12,31 +12,6 @@ from pyspike import PieceWiseConstFunc, PieceWiseLinFunc
############################################################
-# add_auxiliary_spikes
-############################################################
-def add_auxiliary_spikes( spike_train, T_end , T_start=0.0):
- """ Adds spikes at the beginning (T_start) and end (T_end) of the
- observation interval.
- Args:
- - spike_train: ordered array of spike times
- - T_end: end time of the observation interval
- - T_start: start time of the observation interval (default 0.0)
- Returns:
- - spike train with additional spikes at T_start and T_end.
-
- """
- assert spike_train[0] >= T_start, \
- "Spike train has events before the given start time"
- assert spike_train[-1] <= T_end, \
- "Spike train has events after the given end time"
- if spike_train[0] != T_start:
- spike_train = np.insert(spike_train, 0, T_start)
- if spike_train[-1] != T_end:
- spike_train = np.append(spike_train, T_end)
- return spike_train
-
-
-############################################################
# isi_distance
############################################################
def isi_distance(spikes1, spikes2):
diff --git a/pyspike/spikes.py b/pyspike/spikes.py
index 70b48ff..502c460 100644
--- a/pyspike/spikes.py
+++ b/pyspike/spikes.py
@@ -7,12 +7,46 @@ Copyright 2014, Mario Mulansky <mario.mulansky@gmx.net>
import numpy as np
+
+############################################################
+# add_auxiliary_spikes
+############################################################
+def add_auxiliary_spikes(spike_train, time_interval):
+ """ Adds spikes at the beginning and end of the given time interval.
+ Args:
+ - spike_train: ordered array of spike times
+ - time_interval: A pair (T_start, T_end) of values representing the start
+ and end time of the spike train measurement or a single value representing
+ the end time, the T_start is then assuemd as 0. Auxiliary spikes will be
+ added to the spike train at the beginning and end of this interval.
+ Returns:
+ - spike train with additional spikes at T_start and T_end.
+
+ """
+ try:
+ T_start = time_interval[0]
+ T_end = time_interval[1]
+ except:
+ T_start = 0
+ T_end = time_interval
+
+ assert spike_train[0] >= T_start, \
+ "Spike train has events before the given start time"
+ assert spike_train[-1] <= T_end, \
+ "Spike train has events after the given end time"
+ if spike_train[0] != T_start:
+ spike_train = np.insert(spike_train, 0, T_start)
+ if spike_train[-1] != T_end:
+ spike_train = np.append(spike_train, T_end)
+ return spike_train
+
+
############################################################
# spike_train_from_string
############################################################
def spike_train_from_string(s, sep=' '):
""" Converts a string of times into an array of spike times.
- Params:
+ Args:
- s: the string with (ordered) spike times
- sep: The separator between the time numbers.
Returns:
@@ -22,11 +56,45 @@ def spike_train_from_string(s, sep=' '):
############################################################
+# load_spike_trains_txt
+############################################################
+def load_spike_trains_from_txt(file_name, time_interval=None,
+ separator=' ', comment='#'):
+ """ Loads a number of spike trains from a text file. Each line of the text
+ file should contain one spike train as a sequence of spike times separated
+ by `separator`. Empty lines as well as lines starting with `comment` are
+ neglected. The `time_interval` represents the start and the end of the spike
+ trains and it is used to add auxiliary spikes at the beginning and end of
+ each spike train. However, if `time_interval == None`, no auxiliary spikes
+ are added, but note that the Spike and ISI distance both require auxiliary
+ spikes.
+ Args:
+ - file_name: The name of the text file.
+ - time_interval: A pair (T_start, T_end) of values representing the start
+ and end time of the spike train measurement or a single value representing
+ the end time, the T_start is then assuemd as 0. Auxiliary spikes will be
+ added to the spike train at the beginning and end of this interval.
+ - separator: The character used to seprate the values in the text file.
+ - comment: Lines starting with this character are ignored.
+ """
+ spike_trains = []
+ spike_file = open(file_name, 'r')
+ for line in spike_file:
+ if len(line) > 1 and not line.startswith(comment):
+ # use only the lines with actual data and not commented
+ spike_train = spike_train_from_string(line)
+ if not time_interval == None: # add auxiliary spikes if times given
+ spike_train = add_auxiliary_spikes(spike_train, time_interval)
+ spike_trains.append(spike_train)
+ return spike_trains
+
+
+############################################################
# merge_spike_trains
############################################################
def merge_spike_trains(spike_trains):
""" Merges a number of spike trains into a single spike train.
- Params:
+ Args:
- spike_trains: list of arrays of spike times
Returns:
- array with the merged spike times
diff --git a/test/SPIKY_testdata.txt b/test/SPIKY_testdata.txt
index 8fa3fcf..c8bea67 100755
--- a/test/SPIKY_testdata.txt
+++ b/test/SPIKY_testdata.txt
@@ -1,7 +1,10 @@
64.886 305.81 696 937.77 1059.7 1322.2 1576.1 1808.1 2121.5 2381.1 2728.6 2966.9 3223.7 3473.7 3644.3 3936.3
65.553 307.49 696.63 948.66 1070.4 1312.2 1712.7 1934.3 2117.6 2356.9 2727.3 2980.6 3226.9 3475.7 3726.4 3944
+# test comment
69.064 319.1 688.32 947.85 1071.8 1300.8 1697.2 1930.6 2139.4 2354.2 2723.7 2963.6 3221.3 3470.1
59.955 313.83 692.23 955.95 1070.4 1319.6 1681.9 1963.5 2151.4 2373.8 2729.4 2971.2 3220.2 3475.5 3632.3 3788.9
+# empty line
+
59.977 306.84 686.09 935.08 1059.9 1325.9 1543.4 1821.9 2150.2 2390.4 2724.5 2969.6 3222.5 3471.5 3576 3913.9
66.415 313.41 688.83 931.43 1051.8 1304.6 1555.6 1820.2 2150.5 2383.1 2723.4 2947.7 3196.6 3443.5 3575 3804.9
66.449 311.02 689.26 947.12 1058.9 1286.6 1708.2 1957.3 2124.8 2375.7 2709.4 2977.6 3191.1 3449.6 3590.4 3831.2
diff --git a/test/test_distance.py b/test/test_distance.py
index c43f0b3..92b99ae 100644
--- a/test/test_distance.py
+++ b/test/test_distance.py
@@ -13,14 +13,6 @@ from numpy.testing import assert_equal, assert_array_almost_equal
import pyspike as spk
-def test_auxiliary_spikes():
- t = np.array([0.2, 0.4, 0.6, 0.7])
- t_aux = spk.add_auxiliary_spikes(t, T_end=1.0, T_start=0.1)
- assert_equal(t_aux, [0.1, 0.2, 0.4, 0.6, 0.7, 1.0])
- t_aux = spk.add_auxiliary_spikes(t_aux, 1.0)
- assert_equal(t_aux, [0.0, 0.1, 0.2, 0.4, 0.6, 0.7, 1.0])
-
-
def test_isi():
# generate two spike trains:
t1 = np.array([0.2, 0.4, 0.6, 0.7])
@@ -31,8 +23,8 @@ def test_isi():
expected_isi = [-0.1/0.3, -0.1/0.3, 0.05/0.2, 0.05/0.2, -0.15/0.35,
-0.25/0.35, -0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3]
- t1 = spk.add_auxiliary_spikes(t1, 1.0)
- t2 = spk.add_auxiliary_spikes(t2, 1.0)
+ t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0))
+ t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0))
f = spk.isi_distance(t1, t2)
# print("ISI: ", f.y)
@@ -47,8 +39,8 @@ def test_isi():
expected_times = [0.0,0.1,0.2,0.4,0.5,0.6,1.0]
expected_isi = [0.1/0.2, -0.1/0.3, -0.1/0.3, 0.1/0.2, 0.1/0.2, -0.0/0.5]
- t1 = spk.add_auxiliary_spikes(t1, 1.0)
- t2 = spk.add_auxiliary_spikes(t2, 1.0)
+ t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0))
+ t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0))
f = spk.isi_distance(t1, t2)
assert_equal(f.x, expected_times)
@@ -72,8 +64,8 @@ def test_spike():
expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2)
expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2)
- t1 = spk.add_auxiliary_spikes(t1, 1.0)
- t2 = spk.add_auxiliary_spikes(t2, 1.0)
+ t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0))
+ t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0))
f = spk.spike_distance(t1, t2)
assert_equal(f.x, expected_times)
@@ -92,8 +84,8 @@ def test_spike():
expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2)
expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2)
- t1 = spk.add_auxiliary_spikes(t1, 1.0)
- t2 = spk.add_auxiliary_spikes(t2, 1.0)
+ t1 = spk.add_auxiliary_spikes(t1, (0.0,1.0))
+ t2 = spk.add_auxiliary_spikes(t2, (0.0,1.0))
f = spk.spike_distance(t1, t2)
assert_equal(f.x, expected_times)
diff --git a/test/test_merge_spikes.py b/test/test_merge_spikes.py
deleted file mode 100644
index 3162700..0000000
--- a/test/test_merge_spikes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-""" test_merge_spikes.py
-
-Tests merging spikes
-
-Copyright 2014, Mario Mulansky <mario.mulansky@gmx.net>
-"""
-from __future__ import print_function
-import numpy as np
-
-import pyspike as spk
-
-def check_merged_spikes( merged_spikes, spike_trains ):
- # create a flat array with all spike events
- all_spikes = np.array([])
- for spike_train in spike_trains:
- all_spikes = np.append(all_spikes, spike_train)
- indices = np.zeros_like(all_spikes, dtype='bool')
- # check if we find all the spike events in the original spike trains
- for x in merged_spikes:
- i = np.where(all_spikes == x)[0][0] # the first axis and the first entry
- # change to something impossible so we dont find this event again
- all_spikes[i] = -1.0
- indices[i] = True
- assert( indices.all() )
-
-def test_merge_spike_trains():
-
- # first load the data
- spike_trains = []
- spike_file = open("SPIKY_testdata.txt", 'r')
- for line in spike_file:
- spike_trains.append(spk.spike_train_from_string(line))
-
- spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]])
- # test if result is sorted
- assert((spikes == np.sort(spikes)).all())
- # check merging
- check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] )
-
- spikes = spk.merge_spike_trains(spike_trains)
- # test if result is sorted
- assert((spikes == np.sort(spikes)).all())
- # check merging
- check_merged_spikes( spikes, spike_trains )
-
-
-if __name__ == "main":
- test_merge_spike_trains()
-
diff --git a/test/test_spikes.py b/test/test_spikes.py
new file mode 100644
index 0000000..dca580f
--- /dev/null
+++ b/test/test_spikes.py
@@ -0,0 +1,84 @@
+""" test_load.py
+
+Test loading of spike trains from text files
+
+Copyright 2014, Mario Mulansky <mario.mulansky@gmx.net>
+"""
+
+from __future__ import print_function
+import numpy as np
+from numpy.testing import assert_equal
+
+import pyspike as spk
+
+
+def test_auxiliary_spikes():
+ t = np.array([0.2, 0.4, 0.6, 0.7])
+ t_aux = spk.add_auxiliary_spikes(t, time_interval=(0.1, 1.0))
+ assert_equal(t_aux, [0.1, 0.2, 0.4, 0.6, 0.7, 1.0])
+ t_aux = spk.add_auxiliary_spikes(t_aux, time_interval=(0.0, 1.0))
+ assert_equal(t_aux, [0.0, 0.1, 0.2, 0.4, 0.6, 0.7, 1.0])
+
+
+def test_load_from_txt():
+ spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
+ time_interval=(0,4000))
+ assert len(spike_trains) == 40
+
+ # check the first spike train
+ spike_times = [0, 64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1,
+ 1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7,
+ 3644.3, 3936.3, 4000]
+ assert_equal(spike_times, spike_trains[0])
+
+ # check auxiliary spikes
+ for spike_train in spike_trains:
+ assert spike_train[0] == 0.0
+ assert spike_train[-1] == 4000
+
+ # load without adding auxiliary spikes
+ spike_trains2 = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
+ time_interval=None)
+ assert len(spike_trains2) == 40
+ # check auxiliary spikes
+ for i in xrange(len(spike_trains)):
+ assert len(spike_trains[i]) == len(spike_trains2[i])+2 # two spikes less
+
+
+def check_merged_spikes( merged_spikes, spike_trains ):
+ # create a flat array with all spike events
+ all_spikes = np.array([])
+ for spike_train in spike_trains:
+ all_spikes = np.append(all_spikes, spike_train)
+ indices = np.zeros_like(all_spikes, dtype='bool')
+ # check if we find all the spike events in the original spike trains
+ for x in merged_spikes:
+ i = np.where(all_spikes == x)[0][0] # the first axis and the first entry
+ # change to something impossible so we dont find this event again
+ all_spikes[i] = -1.0
+ indices[i] = True
+ assert( indices.all() )
+
+
+def test_merge_spike_trains():
+ # first load the data
+ spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
+ time_interval=(0,4000))
+
+ spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]])
+ # test if result is sorted
+ assert((spikes == np.sort(spikes)).all())
+ # check merging
+ check_merged_spikes( spikes, [spike_trains[0], spike_trains[1]] )
+
+ spikes = spk.merge_spike_trains(spike_trains)
+ # test if result is sorted
+ assert((spikes == np.sort(spikes)).all())
+ # check merging
+ check_merged_spikes( spikes, spike_trains )
+
+if __name__ == "main":
+ test_auxiliary_spikes()
+ test_load_from_txt()
+ test_merge_spike_trains()
+