summaryrefslogtreecommitdiff
path: root/pyspike/spikes.py
diff options
context:
space:
mode:
Diffstat (limited to 'pyspike/spikes.py')
-rw-r--r--pyspike/spikes.py47
1 files changed, 29 insertions, 18 deletions
diff --git a/pyspike/spikes.py b/pyspike/spikes.py
index ab6d4c4..cf47043 100644
--- a/pyspike/spikes.py
+++ b/pyspike/spikes.py
@@ -62,6 +62,31 @@ def load_spike_trains_from_txt(file_name, edges,
return spike_trains
+def import_spike_trains_from_time_series(file_name, start_time, time_bin,
+ separator=None, comment='#'):
+ """ Imports spike trains from time series consisting of 0 and 1 denoting
+ the absence or presence of a spike. Each line in the data file represents
+ one spike train.
+
+ :param file_name: The name of the data file containing the time series.
+ :param edges: A pair (T_start, T_end) of values representing the
+ start and end time of the spike train measurement
+ or a single value representing the end time, the
+ T_start is then assuemd as 0.
+ :param separator: The character used to seprate the values in the text file
+ :param comment: Lines starting with this character are ignored.
+
+ """
+ data = np.loadtxt(file_name, comments=comment, delimiter=separator)
+ time_points = start_time + time_bin + np.arange(len(data[0, :]))*time_bin
+ spike_trains = []
+ for time_series in data:
+ spike_trains.append(SpikeTrain(time_points[time_series > 0],
+ edges=[start_time,
+ time_points[-1]]))
+ return spike_trains
+
+
############################################################
# save_spike_trains_to_txt
############################################################
@@ -91,24 +116,10 @@ def merge_spike_trains(spike_trains):
:param spike_trains: list of :class:`.SpikeTrain`
:returns: spike train with the merged spike times
"""
- # get the lengths of the spike trains
- lens = np.array([len(st.spikes) for st in spike_trains])
- merged_spikes = np.empty(np.sum(lens))
- index = 0 # the index for merged_spikes
- indices = np.zeros_like(lens) # indices of the spike trains
- index_list = np.arange(len(indices)) # indices of indices of spike trains
- # that have not yet reached the end
- # list of the possible events in the spike trains
- vals = [spike_trains[i].spikes[indices[i]] for i in index_list]
- while len(index_list) > 0:
- i = np.argmin(vals) # the next spike is the minimum
- merged_spikes[index] = vals[i] # put it to the merged spike train
- i = index_list[i]
- index += 1 # next index of merged spike train
- indices[i] += 1 # next index for the chosen spike train
- if indices[i] >= lens[i]: # remove spike train index if ended
- index_list = index_list[index_list != i]
- vals = [spike_trains[n].spikes[indices[n]] for n in index_list]
+ # concatenating and sorting with numpy is fast, it also means we can handle
+ # empty spike trains
+ merged_spikes = np.concatenate([st.spikes for st in spike_trains])
+ merged_spikes.sort()
return SpikeTrain(merged_spikes, [spike_trains[0].t_start,
spike_trains[0].t_end])