summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMario Mulansky <mario.mulansky@gmx.net>2016-06-18 16:27:51 -0700
committerMario Mulansky <mario.mulansky@gmx.net>2016-06-18 16:27:51 -0700
commit4691d0e77a024fbc73d1098ee557d65f8f2ddc89 (patch)
tree27340d0c5d921ab8389e078559e02e9303894209
parentc17cc8602414cec883c412008a4300b2c7ac7f80 (diff)
added function to import time series
new function import_spike_trains_from_time_series that loads spike trains from time series.
-rw-r--r--pyspike/__init__.py1
-rw-r--r--pyspike/spikes.py25
-rw-r--r--test/test_spikes.py19
3 files changed, 45 insertions, 0 deletions
diff --git a/pyspike/__init__.py b/pyspike/__init__.py
index 069090b..1e879c4 100644
--- a/pyspike/__init__.py
+++ b/pyspike/__init__.py
@@ -24,6 +24,7 @@ from .spike_sync import spike_sync_profile, spike_sync,\
from .psth import psth
from .spikes import load_spike_trains_from_txt, spike_train_from_string, \
+ import_spike_trains_from_time_series, \
merge_spike_trains, generate_poisson_spikes
# define the __version__ following
diff --git a/pyspike/spikes.py b/pyspike/spikes.py
index b18d7eb..1bf474c 100644
--- a/pyspike/spikes.py
+++ b/pyspike/spikes.py
@@ -57,6 +57,31 @@ def load_spike_trains_from_txt(file_name, edges,
return spike_trains
+def import_spike_trains_from_time_series(file_name, start_time, time_bin,
+ separator=None, comment='#'):
+ """ Imports spike trains from time series consisting of 0 and 1 denoting
+ the absence or presence of a spike. Each line in the data file represents
+ one spike train.
+
+ :param file_name: The name of the data file containing the time series.
+ :param edges: A pair (T_start, T_end) of values representing the
+ start and end time of the spike train measurement
+ or a single value representing the end time, the
+ T_start is then assuemd as 0.
+ :param separator: The character used to seprate the values in the text file
+ :param comment: Lines starting with this character are ignored.
+
+ """
+ data = np.loadtxt(file_name, comments=comment, delimiter=separator)
+ time_points = start_time + time_bin + np.arange(len(data[0, :]))*time_bin
+ spike_trains = []
+ for time_series in data:
+ spike_trains.append(SpikeTrain(time_points[time_series > 0],
+ edges=[start_time,
+ time_points[-1]]))
+ return spike_trains
+
+
############################################################
# merge_spike_trains
############################################################
diff --git a/test/test_spikes.py b/test/test_spikes.py
index 609a819..bcface2 100644
--- a/test/test_spikes.py
+++ b/test/test_spikes.py
@@ -17,6 +17,10 @@ import os
TEST_PATH = os.path.dirname(os.path.realpath(__file__))
TEST_DATA = os.path.join(TEST_PATH, "PySpike_testdata.txt")
+TIME_SERIES_DATA = os.path.join(TEST_PATH, "time_series.txt")
+TIME_SERIES_SPIKES = os.path.join(TEST_PATH, "time_series_spike_trains.txt")
+
+
def test_load_from_txt():
spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))
assert len(spike_trains) == 40
@@ -33,6 +37,21 @@ def test_load_from_txt():
assert spike_train.t_end == 4000
+def test_load_time_series():
+ spike_trains = spk.import_spike_trains_from_time_series(TIME_SERIES_DATA,
+ start_time=0,
+ time_bin=1)
+ assert len(spike_trains) == 40
+ spike_trains_check = spk.load_spike_trains_from_txt(TIME_SERIES_SPIKES,
+ edges=(0, 4000))
+
+ # check spike trains
+ for n in range(len(spike_trains)):
+ assert_equal(spike_trains[n].spikes, spike_trains_check[n].spikes)
+ assert_equal(spike_trains[n].t_start, 0)
+ assert_equal(spike_trains[n].t_end, 4000)
+
+
def check_merged_spikes(merged_spikes, spike_trains):
# create a flat array with all spike events
all_spikes = np.array([])