From d5995b82d0de1c5b448823b4acd4efe7d5d47eb4 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Thu, 30 Apr 2015 18:23:50 +0200 Subject: new version in docs --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 5427bb1..9b994c2 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -64,9 +64,9 @@ copyright = u'2014-2015, Mario Mulansky' # built documents. # # The short X.Y version. -version = '0.1' +version = '0.2' # The full version, including alpha/beta/rc tags. -release = '0.1' +release = '0.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -- cgit v1.2.3 From 7e3648264eb3f96257909e53939a70f2fa79f1be Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 4 May 2015 11:37:49 +0200 Subject: restructured docs moved tutorial from Readme to separate section. Readme now contains only two basic examples --- Changelog | 4 ++ Readme.rst | 192 ++-------------------------------------------------------- doc/index.rst | 8 ++- 3 files changed, 15 insertions(+), 189 deletions(-) diff --git a/Changelog b/Changelog index 2ac065b..209b138 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,7 @@ +PySpike v0.2.1: + * addition of __version__ attribute + * restructured docs, Readme now only contains basic examples + PySpike v0.2: * introduction of SpikeTrain class. Breaking interface change of diff --git a/Readme.rst b/Readme.rst index e80c0f7..1094332 100644 --- a/Readme.rst +++ b/Readme.rst @@ -70,63 +70,11 @@ Then you can run the tests using the `nosetests` test framework: Finally, you should make PySpike's installation folder known to Python to be able to import pyspike in your own projects. Therefore, add your :code:`/path/to/PySpike` to the :code:`$PYTHONPATH` environment variable. -Spike trains ------------- -In PySpike, spike trains are represented by :class:`.SpikeTrain` objects. -A :class:`.SpikeTrain` object consists of the spike times given as `numpy` arrays as well as the edges of the spike train as :code:`[t_start, t_end]`. -The following code creates such a spike train with some arbitrary spike times: - -.. code:: python - - import numpy as np - from pyspike import SpikeTrain - - spike_train = SpikeTrain(np.array([0.1, 0.3, 0.45, 0.6, 0.9], [0.0, 1.0])) - -Loading from text files -....................... - -Typically, spike train data is loaded into PySpike from data files. -The most straight-forward data files are text files where each line represents one spike train given as an sequence of spike times. -An exemplary file with several spike trains is `PySpike_testdata.txt `_. -To quickly obtain spike trains from such files, PySpike provides the function :func:`.load_spike_trains_from_txt`. - -.. code:: python - - import numpy as np - import pyspike as spk - - spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", - edges=(0, 4000)) - -This function expects the name of the data file as first parameter. -Furthermore, the time interval of the spike train measurement (edges of the spike trains) should be provided as a pair of start- and end-time values. -Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function). -As result, :func:`.load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. - - -Computing bivariate distances profiles ---------------------------------------- - -**Important note:** - ------------------------------- - - Spike trains are expected to be *sorted*! - For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted. - Make sure that all your spike trains are sorted, which is ensured if you use the :func:`.load_spike_trains_from_txt` function with the parameter `is_sorted=False` (default). - If in doubt, use :meth:`.SpikeTrain.sort()` to ensure a correctly sorted spike train. - - If you need to copy a spike train, use the :meth:`.SpikeTrain.copy()` method. - Simple assignment `t2 = t1` does not create a copy of the spike train data, but a reference as `numpy.array` is used for storing the data. - ------------------------------- - -ISI-distance -............ +Examples +----------------------------- -The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :class:`.SpikeTrain` s, and plots it with matplotlib: +The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :code:`SpikeTrain` s, and plots it with matplotlib: .. code:: python @@ -141,109 +89,8 @@ The following code loads some exemplary spike trains, computes the dissimilarity print("ISI distance: %.8f" % isi_profile.avrg()) plt.show() -The ISI-profile is a piece-wise constant function, and hence the function :func:`.isi_profile` returns an instance of the :class:`.PieceWiseConstFunc` class. -As shown above, this class allows you to obtain arrays that can be used to plot the function with :code:`plt.plt`, but also to compute the time average, which amounts to the final scalar ISI-distance. -By default, the time average is computed for the whole :class:`.PieceWiseConstFunc` function. -However, it is also possible to obtain the average of a specific interval by providing a pair of floats defining the start and end of the interval. -For the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals: - -.. code:: python - - isi1 = isi_profile.avrg(interval=(0, 1000)) - isi2 = isi_profile.avrg(interval=(1000, 2000)) - isi3 = isi_profile.avrg(interval=[(0, 1000), (2000, 3000)]) - isi4 = isi_profile.avrg(interval=[(1000, 2000), (3000, 4000)]) - -Note, how also multiple intervals can be supplied by giving a list of tuples. - -If you are only interested in the scalar ISI-distance and not the profile, you can simply use: - -.. code:: python - - isi_dist = spk.isi_distance(spike_trains[0], spike_trains[1], interval) - -where :code:`interval` is optional, as above, and if omitted the ISI-distance is computed for the complete spike trains. - - -SPIKE-distance -.............. - -To compute for the spike distance profile you use the function :func:`.spike_profile` instead of :code:`isi_profile` above. -But the general approach is very similar: - -.. code:: python - - import matplotlib.pyplot as plt - import pyspike as spk - - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - edges=(0, 4000)) - spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1]) - x, y = spike_profile.get_plottable_data() - plt.plot(x, y, '--k') - print("SPIKE distance: %.8f" % spike_profile.avrg()) - plt.show() - -This short example computes and plots the SPIKE-profile of the first two spike trains in the file :code:`PySpike_testdata.txt`. -In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :class:`.PieceWiseLinFunc` object. -Just like the :class:`.PieceWiseConstFunc` for the ISI-profile, the :class:`.PieceWiseLinFunc` provides a :meth:`.PieceWiseLinFunc.get_plottable_data` member function that returns arrays that can be used directly to plot the function. -Furthermore, the :meth:`.PieceWiseLinFunc.avrg` member function returns the average of the profile defined as the overall SPIKE distance. -As above, you can provide an interval as a pair of floats as well as a sequence of such pairs to :code:`avrg` to specify the averaging interval if required. -Again, you can use - -.. code:: python - - spike_dist = spk.spike_distance(spike_trains[0], spike_trains[1], interval) - -to compute the SPIKE distance directly, if you are not interested in the profile at all. -The parameter :code:`interval` is optional and if neglected the whole spike train is used. - - -SPIKE synchronization -..................... - -**Important note:** - ------------------------------- - - SPIKE-Synchronization measures *similarity*. - That means, a value of zero indicates absence of synchrony, while a value of one denotes the presence of synchrony. - This is exactly opposite to the other two measures: ISI- and SPIKE-distance. - ----------------------- - - -SPIKE synchronization is another approach to measure spike synchrony. -In contrast to the SPIKE- and ISI-distance, it measures similarity instead of dissimilarity, i.e. higher values represent larger synchrony. -Another difference is that the SPIKE synchronization profile is only defined exactly at the spike times, not for the whole interval of the spike trains. -Therefore, it is represented by a :class:`.DiscreteFunction`. - -To compute for the spike synchronization profile, PySpike provides the function :func:`.spike_sync_profile`. -The general handling of the profile, however, is similar to the other profiles above: - -.. code:: python - - import matplotlib.pyplot as plt - import pyspike as spk - - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", - edges=(0, 4000)) - spike_profile = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) - x, y = spike_profile.get_plottable_data() - -For the direct computation of the overall spike synchronization value within some interval, the :func:`.spike_sync` function can be used: - -.. code:: python - - spike_sync = spk.spike_sync(spike_trains[0], spike_trains[1], interval) - - -Computing multivariate profiles and distances ----------------------------------------------- - -To compute the multivariate ISI-profile, SPIKE-profile or SPIKE-Synchronization profile f a set of spike trains, PySpike provides multi-variate version of the profile function. -The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :func:`.isi_profile_multi`, :func:`.spike_profile_multi`, :func:`.spike_sync_profile_multi` functions: +The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :code:`isi_profile_multi`, :code:`spike_profile_multi`, :code:`spike_sync_profile_multi` functions: .. code:: python @@ -253,36 +100,7 @@ The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-prof avrg_spike_profile = spk.spike_profile_multi(spike_trains) avrg_spike_sync_profile = spk.spike_sync_profile_multi(spike_trains) -All functions take an optional parameter :code:`indices`, a list of indices that allows to define the spike trains that should be used for the multivariate profile. -As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :func:`.isi_distance_multi`, :func:`.spike_distance_multi` and :func:`.spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value. -Those functions also accept an :code:`interval` parameter that can be used to specify the begin and end of the averaging interval as a pair of floats, if neglected the complete interval is used. - -Another option to characterize large sets of spike trains are distance matrices. -Each entry in the distance matrix represents a bivariate distance (similarity for SPIKE-Synchronization) of two spike trains. -The distance matrix is symmetric and has zero values (ones) at the diagonal and is computed with the functions :func:`.isi_distance_matrix`, :func:`.spike_distance_matrix` and :func:`.spike_sync_matrix`. -The following example computes and plots the ISI- and SPIKE-distance matrix as well as the SPIKE-Synchronization-matrix, with different intervals. - -.. code:: python - - spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000) - - plt.figure() - isi_distance = spk.isi_distance_matrix(spike_trains) - plt.imshow(isi_distance, interpolation='none') - plt.title("ISI-distance") - - plt.figure() - spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0,1000)) - plt.imshow(spike_distance, interpolation='none') - plt.title("SPIKE-distance") - - plt.figure() - spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000,4000)) - plt.imshow(spike_sync, interpolation='none') - plt.title("SPIKE-Sync") - - plt.show() - +More examples with detailed descriptions can be found in the `tutorial section `_. =============================================================================== diff --git a/doc/index.rst b/doc/index.rst index bc05b60..f34690b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -5,6 +5,11 @@ .. include:: ../Readme.rst +Tutorial +=================================== + +.. include:: tutorial.rst + Reference =================================== @@ -14,9 +19,8 @@ Reference pyspike Indices and tables -================== +=================================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` - -- cgit v1.2.3 From a7df02c0dc064dbb0586a394bb74200c7d6d67df Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 4 May 2015 11:40:25 +0200 Subject: removed class reference from Readme --- Readme.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Readme.rst b/Readme.rst index 1094332..b8858f5 100644 --- a/Readme.rst +++ b/Readme.rst @@ -22,7 +22,7 @@ All source codes are available on `Github Date: Thu, 7 May 2015 23:08:12 +0200 Subject: performance improvements use recursive approach to compute average profile for average multivariate distances, dont compute average multivariate profile, but average distances directly. --- examples/performance.py | 42 +++++++++++++++++++++++ pyspike/cython/cython_add.pyx | 20 +++++------ pyspike/generic.py | 77 +++++++++++++++++++++++++++++++++++++++---- pyspike/isi_distance.py | 6 ++-- pyspike/spike_distance.py | 6 ++-- test/test_distance.py | 14 ++++++-- 6 files changed, 139 insertions(+), 26 deletions(-) create mode 100644 examples/performance.py diff --git a/examples/performance.py b/examples/performance.py new file mode 100644 index 0000000..469b5ab --- /dev/null +++ b/examples/performance.py @@ -0,0 +1,42 @@ +""" Compute distances of large sets of spike trains for performance tests +""" + +from __future__ import print_function + +import pyspike as spk +from datetime import datetime +import cProfile + +M = 100 # number of spike trains +r = 1.0 # rate of Poisson spike times +T = 1E3 # length of spike trains + +print("%d spike trains with %d spikes" % (M, int(r*T))) + +spike_trains = [] + +t_start = datetime.now() +for i in xrange(M): + spike_trains.append(spk.generate_poisson_spikes(r, T)) +t_end = datetime.now() +runtime = (t_end-t_start).total_seconds() + +print("Spike generation runtime: %.3fs" % runtime) + +print("================ ISI COMPUTATIONS ================") +print(" MULTIVARIATE DISTANCE") +cProfile.run('spk.isi_distance_multi(spike_trains)') +print(" MULTIVARIATE PROFILE") +cProfile.run('spk.isi_profile_multi(spike_trains)') + +print("================ SPIKE COMPUTATIONS ================") +print(" MULTIVARIATE DISTANCE") +cProfile.run('spk.spike_distance_multi(spike_trains)') +print(" MULTIVARIATE PROFILE") +cProfile.run('spk.spike_profile_multi(spike_trains)') + +print("================ SPIKE-SYNC COMPUTATIONS ================") +print(" MULTIVARIATE DISTANCE") +cProfile.run('spk.spike_sync_multi(spike_trains)') +print(" MULTIVARIATE PROFILE") +cProfile.run('spk.spike_sync_profile_multi(spike_trains)') diff --git a/pyspike/cython/cython_add.pyx b/pyspike/cython/cython_add.pyx index ac64005..8da1e53 100644 --- a/pyspike/cython/cython_add.pyx +++ b/pyspike/cython/cython_add.pyx @@ -83,13 +83,9 @@ def add_piece_wise_const_cython(double[:] x1, double[:] y1, else: # both arrays reached the end simultaneously # only the last x-value missing x_new[index+1] = x1[N1-1] - # the last value is again the end of the interval - # x_new[index+1] = x1[-1] - # only use the data that was actually filled - x1 = x_new[:index+2] - y1 = y_new[:index+1] # end nogil - return np.array(x_new[:index+2]), np.array(y_new[:index+1]) + # return np.asarray(x_new[:index+2]), np.asarray(y_new[:index+1]) + return np.asarray(x_new[:index+2]), np.asarray(y_new[:index+1]) ############################################################ @@ -169,9 +165,9 @@ def add_piece_wise_lin_cython(double[:] x1, double[:] y11, double[:] y12, y2_new[index] = y12[N1-2]+y22[N2-2] # only use the data that was actually filled # end nogil - return (np.array(x_new[:index+2]), - np.array(y1_new[:index+1]), - np.array(y2_new[:index+1])) + return (np.asarray(x_new[:index+2]), + np.asarray(y1_new[:index+1]), + np.asarray(y2_new[:index+1])) ############################################################ @@ -230,6 +226,6 @@ def add_discrete_function_cython(double[:] x1, double[:] y1, double[:] mp1, # the last value is again the end of the interval # only use the data that was actually filled - return (np.array(x_new[:index+1]), - np.array(y_new[:index+1]), - np.array(mp_new[:index+1])) + return (np.asarray(x_new[:index+1]), + np.asarray(y_new[:index+1]), + np.asarray(mp_new[:index+1])) diff --git a/pyspike/generic.py b/pyspike/generic.py index 4f278d2..41affcb 100644 --- a/pyspike/generic.py +++ b/pyspike/generic.py @@ -31,6 +31,69 @@ def _generic_profile_multi(spike_trains, pair_distance_func, indices=None): Returns: - The averaged multi-variate distance of all pairs """ + + def divide_and_conquer(pairs1, pairs2): + """ recursive calls by splitting the two lists in half. + """ + L1 = len(pairs1) + if L1 > 1: + dist_prof1 = divide_and_conquer(pairs1[:L1/2], pairs1[L1/2:]) + else: + dist_prof1 = pair_distance_func(spike_trains[pairs1[0][0]], + spike_trains[pairs1[0][1]]) + L2 = len(pairs2) + if L2 > 1: + dist_prof2 = divide_and_conquer(pairs2[:L2/2], pairs2[L2/2:]) + else: + dist_prof2 = pair_distance_func(spike_trains[pairs2[0][0]], + spike_trains[pairs2[0][1]]) + dist_prof1.add(dist_prof2) + return dist_prof1 + + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + L = len(pairs) + if L > 1: + # recursive iteration through the list of pairs to get average profile + avrg_dist = divide_and_conquer(pairs[:len(pairs)/2], + pairs[len(pairs)/2:]) + else: + avrg_dist = pair_distance_func(spike_trains[pairs[0][0]], + spike_trains[pairs[0][1]]) + + return avrg_dist, L + + +############################################################ +# _generic_distance_multi +############################################################ +def _generic_distance_multi(spike_trains, pair_distance_func, + indices=None, interval=None): + """ Internal implementation detail, don't call this function directly, + use isi_distance_multi or spike_distance_multi instead. + + Computes the multi-variate distance for a set of spike-trains using the + pair_dist_func to compute pair-wise distances. That is it computes the + average distance of all pairs of spike-trains: + :math:`S(t) = 2/((N(N-1)) sum_{} D_{i,j}`, + where the sum goes over all pairs . + Args: + - spike_trains: list of spike trains + - pair_distance_func: function computing the distance of two spike trains + - indices: list of indices defining which spike trains to use, + if None all given spike trains are used (default=None) + Returns: + - The averaged multi-variate distance of all pairs + """ + if indices is None: indices = np.arange(len(spike_trains)) indices = np.array(indices) @@ -40,13 +103,13 @@ def _generic_profile_multi(spike_trains, pair_distance_func, indices=None): # generate a list of possible index pairs pairs = [(indices[i], j) for i in range(len(indices)) for j in indices[i+1:]] - # start with first pair - (i, j) = pairs[0] - average_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - for (i, j) in pairs[1:]: - current_dist = pair_distance_func(spike_trains[i], spike_trains[j]) - average_dist.add(current_dist) # add to the average - return average_dist, len(pairs) + + avrg_dist = 0.0 + for (i, j) in pairs: + avrg_dist += pair_distance_func(spike_trains[i], spike_trains[j], + interval) + + return avrg_dist/len(pairs) ############################################################ diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index aeab0df..21df561 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -3,7 +3,8 @@ # Distributed under the BSD License from pyspike import PieceWiseConstFunc -from pyspike.generic import _generic_profile_multi, _generic_distance_matrix +from pyspike.generic import _generic_profile_multi, _generic_distance_multi, \ + _generic_distance_matrix ############################################################ @@ -112,7 +113,8 @@ def isi_distance_multi(spike_trains, indices=None, interval=None): :returns: The time-averaged multivariate ISI distance :math:`D_I` :rtype: double """ - return isi_profile_multi(spike_trains, indices).avrg(interval) + return _generic_distance_multi(spike_trains, isi_distance, indices, + interval) ############################################################ diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index cc620d4..75b3b0e 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -3,7 +3,8 @@ # Distributed under the BSD License from pyspike import PieceWiseLinFunc -from pyspike.generic import _generic_profile_multi, _generic_distance_matrix +from pyspike.generic import _generic_profile_multi, _generic_distance_multi, \ + _generic_distance_matrix ############################################################ @@ -117,7 +118,8 @@ def spike_distance_multi(spike_trains, indices=None, interval=None): :returns: The averaged multi-variate spike distance :math:`D_S`. :rtype: double """ - return spike_profile_multi(spike_trains, indices).avrg(interval) + return _generic_distance_multi(spike_trains, spike_distance, indices, + interval) ############################################################ diff --git a/test/test_distance.py b/test/test_distance.py index 19da35f..e45ac16 100644 --- a/test/test_distance.py +++ b/test/test_distance.py @@ -196,7 +196,7 @@ def test_spike_sync(): 0.4, decimal=16) -def check_multi_profile(profile_func, profile_func_multi): +def check_multi_profile(profile_func, profile_func_multi, dist_func_multi): # generate spike trains: t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0) t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0) @@ -213,10 +213,14 @@ def check_multi_profile(profile_func, profile_func_multi): f_multi = profile_func_multi(spike_trains, [0, 1]) assert f_multi.almost_equal(f12, decimal=14) + d = dist_func_multi(spike_trains, [0, 1]) + assert_equal(f_multi.avrg(), d) f_multi1 = profile_func_multi(spike_trains, [1, 2, 3]) f_multi2 = profile_func_multi(spike_trains[1:]) assert f_multi1.almost_equal(f_multi2, decimal=14) + d = dist_func_multi(spike_trains, [1, 2, 3]) + assert_almost_equal(f_multi1.avrg(), d, decimal=14) f = copy(f12) f.add(f13) @@ -224,6 +228,8 @@ def check_multi_profile(profile_func, profile_func_multi): f.mul_scalar(1.0/3) f_multi = profile_func_multi(spike_trains, [0, 1, 2]) assert f_multi.almost_equal(f, decimal=14) + d = dist_func_multi(spike_trains, [0, 1, 2]) + assert_almost_equal(f_multi.avrg(), d, decimal=14) f.mul_scalar(3) # revert above normalization f.add(f14) @@ -235,11 +241,13 @@ def check_multi_profile(profile_func, profile_func_multi): def test_multi_isi(): - check_multi_profile(spk.isi_profile, spk.isi_profile_multi) + check_multi_profile(spk.isi_profile, spk.isi_profile_multi, + spk.isi_distance_multi) def test_multi_spike(): - check_multi_profile(spk.spike_profile, spk.spike_profile_multi) + check_multi_profile(spk.spike_profile, spk.spike_profile_multi, + spk.spike_distance_multi) def test_multi_spike_sync(): -- cgit v1.2.3 From 76a4bbcc733bdd24bb61072a341c43a14b7f83d1 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 8 May 2015 11:57:00 +0200 Subject: performance improvement for multivar spike sync dont compute the average profile in the function spike_sync_multi, but rather compute the overall average distance directly --- examples/performance.py | 29 +++++++++++++++++++++++------ pyspike/DiscreteFunc.py | 17 +++++++++-------- pyspike/spike_sync.py | 22 ++++++++++++++++++++-- 3 files changed, 52 insertions(+), 16 deletions(-) diff --git a/examples/performance.py b/examples/performance.py index 469b5ab..94dae25 100644 --- a/examples/performance.py +++ b/examples/performance.py @@ -6,6 +6,7 @@ from __future__ import print_function import pyspike as spk from datetime import datetime import cProfile +import pstats M = 100 # number of spike trains r = 1.0 # rate of Poisson spike times @@ -22,21 +23,37 @@ t_end = datetime.now() runtime = (t_end-t_start).total_seconds() print("Spike generation runtime: %.3fs" % runtime) +print() print("================ ISI COMPUTATIONS ================") print(" MULTIVARIATE DISTANCE") -cProfile.run('spk.isi_distance_multi(spike_trains)') +cProfile.run('spk.isi_distance_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) + print(" MULTIVARIATE PROFILE") -cProfile.run('spk.isi_profile_multi(spike_trains)') +cProfile.run('spk.isi_profile_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) print("================ SPIKE COMPUTATIONS ================") print(" MULTIVARIATE DISTANCE") -cProfile.run('spk.spike_distance_multi(spike_trains)') +cProfile.run('spk.spike_distance_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) + print(" MULTIVARIATE PROFILE") -cProfile.run('spk.spike_profile_multi(spike_trains)') +cProfile.run('spk.spike_profile_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) print("================ SPIKE-SYNC COMPUTATIONS ================") print(" MULTIVARIATE DISTANCE") -cProfile.run('spk.spike_sync_multi(spike_trains)') +cProfile.run('spk.spike_sync_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) + print(" MULTIVARIATE PROFILE") -cProfile.run('spk.spike_sync_profile_multi(spike_trains)') +cProfile.run('spk.spike_sync_profile_multi(spike_trains)', 'performance.stat') +p = pstats.Stats('performance.stat') +p.strip_dirs().sort_stats('tottime').print_stats(5) diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py index 33b7a81..dfe2cab 100644 --- a/pyspike/DiscreteFunc.py +++ b/pyspike/DiscreteFunc.py @@ -125,15 +125,15 @@ class DiscreteFunc(object): def integral(self, interval=None): """ Returns the integral over the given interval. For the discrete - function, this amounts to the sum over all values divided by the total - multiplicity. + function, this amounts to two values: the sum over all values and the + sum over all multiplicities. :param interval: integration interval given as a pair of floats, or a sequence of pairs in case of multiple intervals, if None the integral over the whole function is computed. :type interval: Pair, sequence of pairs, or None. - :returns: the integral - :rtype: float + :returns: the summed values and the summed multiplicity + :rtype: pair of float """ def get_indices(ival): @@ -147,7 +147,7 @@ class DiscreteFunc(object): if interval is None: # no interval given, integrate over the whole spike train # don't count the first value, which is zero by definition - return 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1]) + return (1.0 * np.sum(self.y[1:-1]), np.sum(self.mp[1:-1])) # check if interval is as sequence assert isinstance(interval, collections.Sequence), \ @@ -156,7 +156,7 @@ class DiscreteFunc(object): if not isinstance(interval[0], collections.Sequence): # find the indices corresponding to the interval start_ind, end_ind = get_indices(interval) - return (np.sum(self.y[start_ind:end_ind]) / + return (np.sum(self.y[start_ind:end_ind]), np.sum(self.mp[start_ind:end_ind])) else: value = 0.0 @@ -166,7 +166,7 @@ class DiscreteFunc(object): start_ind, end_ind = get_indices(ival) value += np.sum(self.y[start_ind:end_ind]) multiplicity += np.sum(self.mp[start_ind:end_ind]) - return value/multiplicity + return (value, multiplicity) def avrg(self, interval=None): """ Computes the average of the interval sequence: @@ -180,7 +180,8 @@ class DiscreteFunc(object): :returns: the average a. :rtype: float """ - return self.integral(interval) + val, mp = self.integral(interval) + return val/mp def add(self, f): """ Adds another `DiscreteFunc` function to this function. diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 9d2e363..0c78228 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -3,6 +3,7 @@ # Copyright 2014-2015, Mario Mulansky # Distributed under the BSD License +import numpy as np from functools import partial from pyspike import DiscreteFunc from pyspike.generic import _generic_profile_multi, _generic_distance_matrix @@ -131,8 +132,25 @@ def spike_sync_multi(spike_trains, indices=None, interval=None, max_tau=None): :rtype: double """ - return spike_sync_profile_multi(spike_trains, indices, - max_tau).avrg(interval) + if indices is None: + indices = np.arange(len(spike_trains)) + indices = np.array(indices) + # check validity of indices + assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \ + "Invalid index list." + # generate a list of possible index pairs + pairs = [(indices[i], j) for i in range(len(indices)) + for j in indices[i+1:]] + + coincidence = 0.0 + mp = 0.0 + for (i, j) in pairs: + profile = spike_sync_profile(spike_trains[i], spike_trains[j]) + summed_vals = profile.integral(interval) + coincidence += summed_vals[0] + mp += summed_vals[1] + + return coincidence/mp ############################################################ -- cgit v1.2.3 From f44d78a5f0b4ab25bb443accdcb9fc1bd8ff57da Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 8 May 2015 12:08:48 +0200 Subject: improved error message --- pyspike/isi_distance.py | 4 ++-- pyspike/spike_distance.py | 4 ++-- pyspike/spike_sync.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index 21df561..8b1e9bf 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -25,9 +25,9 @@ def isi_profile(spike_train1, spike_train2): """ # check whether the spike trains are defined for the same interval assert spike_train1.t_start == spike_train2.t_start, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" assert spike_train1.t_end == spike_train2.t_end, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" # load cython implementation try: diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index 75b3b0e..499ab77 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -25,9 +25,9 @@ def spike_profile(spike_train1, spike_train2): """ # check whether the spike trains are defined for the same interval assert spike_train1.t_start == spike_train2.t_start, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" assert spike_train1.t_end == spike_train2.t_end, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" # cython implementation try: diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 0c78228..7d429e4 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -30,9 +30,9 @@ def spike_sync_profile(spike_train1, spike_train2, max_tau=None): """ # check whether the spike trains are defined for the same interval assert spike_train1.t_start == spike_train2.t_start, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" assert spike_train1.t_end == spike_train2.t_end, \ - "Given spike trains seems not to have auxiliary spikes!" + "Given spike trains are not defined on the same interval!" # cython implementation try: -- cgit v1.2.3 From 619ffd7105203938a26075c79a77d63960da9922 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 8 May 2015 12:29:47 +0200 Subject: renamed cython_distance module -> cython_profiles --- pyspike/cython/cython_distance.pyx | 423 ------------------------------------- pyspike/cython/cython_profiles.pyx | 423 +++++++++++++++++++++++++++++++++++++ pyspike/isi_distance.py | 10 +- pyspike/spike_distance.py | 16 +- pyspike/spike_sync.py | 15 +- setup.py | 8 +- 6 files changed, 447 insertions(+), 448 deletions(-) delete mode 100644 pyspike/cython/cython_distance.pyx create mode 100644 pyspike/cython/cython_profiles.pyx diff --git a/pyspike/cython/cython_distance.pyx b/pyspike/cython/cython_distance.pyx deleted file mode 100644 index 6ee0181..0000000 --- a/pyspike/cython/cython_distance.pyx +++ /dev/null @@ -1,423 +0,0 @@ -#cython: boundscheck=False -#cython: wraparound=False -#cython: cdivision=True - -""" -cython_distances.pyx - -cython implementation of the isi- and spike-distance - -Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects -improves the performance of spike_distance by a factor of 10! - -Copyright 2014, Mario Mulansky - -Distributed under the BSD License - -""" - -""" -To test whether things can be optimized: remove all yellow stuff -in the html output:: - - cython -a cython_distance.pyx - -which gives:: - - cython_distance.html - -""" - -import numpy as np -cimport numpy as np - -from libc.math cimport fabs -from libc.math cimport fmax -from libc.math cimport fmin - -DTYPE = np.float -ctypedef np.float_t DTYPE_t - - -############################################################ -# isi_distance_cython -############################################################ -def isi_distance_cython(double[:] s1, double[:] s2, - double t_start, double t_end): - - cdef double[:] spike_events - cdef double[:] isi_values - cdef int index1, index2, index - cdef int N1, N2 - cdef double nu1, nu2 - N1 = len(s1) - N2 = len(s2) - - spike_events = np.empty(N1+N2+2) - # the values have one entry less as they are defined at the intervals - isi_values = np.empty(N1+N2+1) - - # first x-value of the profile - spike_events[0] = t_start - - # first interspike interval - check if a spike exists at the start time - if s1[0] > t_start: - # edge correction - nu1 = fmax(s1[0]-t_start, s1[1]-s1[0]) - index1 = -1 - else: - nu1 = s1[1]-s1[0] - index1 = 0 - - if s2[0] > t_start: - # edge correction - nu2 = fmax(s2[0]-t_start, s2[1]-s2[0]) - index2 = -1 - else: - nu2 = s2[1]-s2[0] - index2 = 0 - - isi_values[0] = fabs(nu1-nu2)/fmax(nu1, nu2) - index = 1 - - with nogil: # release the interpreter to allow multithreading - while index1+index2 < N1+N2-2: - # check which spike is next, only if there are spikes left in 1 - # next spike in 1 is earlier, or there are no spikes left in 2 - if (index1 < N1-1) and ((index2 == N2-1) or - (s1[index1+1] < s2[index2+1])): - index1 += 1 - spike_events[index] = s1[index1] - if index1 < N1-1: - nu1 = s1[index1+1]-s1[index1] - else: - # edge correction - nu1 = fmax(t_end-s1[index1], nu1) - elif (index2 < N2-1) and ((index1 == N1-1) or - (s1[index1+1] > s2[index2+1])): - index2 += 1 - spike_events[index] = s2[index2] - if index2 < N2-1: - nu2 = s2[index2+1]-s2[index2] - else: - # edge correction - nu2 = fmax(t_end-s2[index2], nu2) - else: # s1[index1+1] == s2[index2+1] - index1 += 1 - index2 += 1 - spike_events[index] = s1[index1] - if index1 < N1-1: - nu1 = s1[index1+1]-s1[index1] - else: - # edge correction - nu1 = fmax(t_end-s1[index1], nu1) - if index2 < N2-1: - nu2 = s2[index2+1]-s2[index2] - else: - # edge correction - nu2 = fmax(t_end-s2[index2], nu2) - # compute the corresponding isi-distance - isi_values[index] = fabs(nu1 - nu2) / fmax(nu1, nu2) - index += 1 - # the last event is the interval end - if spike_events[index-1] == t_end: - index -= 1 - else: - spike_events[index] = t_end - # end nogil - - return spike_events[:index+1], isi_values[:index] - - -############################################################ -# get_min_dist_cython -############################################################ -cdef inline double get_min_dist_cython(double spike_time, - double[:] spike_train, - # use memory view to ensure inlining - # np.ndarray[DTYPE_t,ndim=1] spike_train, - int N, - int start_index, - double t_start, double t_end) nogil: - """ Returns the minimal distance |spike_time - spike_train[i]| - with i>=start_index. - """ - cdef double d, d_temp - # start with the distance to the start time - d = fabs(spike_time - t_start) - if start_index < 0: - start_index = 0 - while start_index < N: - d_temp = fabs(spike_time - spike_train[start_index]) - if d_temp > d: - return d - else: - d = d_temp - start_index += 1 - - # finally, check the distance to end time - d_temp = fabs(t_end - spike_time) - if d_temp > d: - return d - else: - return d_temp - - -############################################################ -# isi_avrg_cython -############################################################ -cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: - return 0.5*(isi1+isi2)*(isi1+isi2) - # alternative definition to obtain ~ 0.5 for Poisson spikes - # return 0.5*(isi1*isi1+isi2*isi2) - - -############################################################ -# spike_distance_cython -############################################################ -def spike_distance_cython(double[:] t1, double[:] t2, - double t_start, double t_end): - - cdef double[:] spike_events - cdef double[:] y_starts - cdef double[:] y_ends - - cdef int N1, N2, index1, index2, index - cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 - cdef double isi1, isi2, s1, s2 - - N1 = len(t1) - N2 = len(t2) - - spike_events = np.empty(N1+N2+2) - - y_starts = np.empty(len(spike_events)-1) - y_ends = np.empty(len(spike_events)-1) - - with nogil: # release the interpreter to allow multithreading - spike_events[0] = t_start - t_p1 = t_start - t_p2 = t_start - if t1[0] > t_start: - # dt_p1 = t2[0]-t_start - t_f1 = t1[0] - dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) - isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) - dt_p1 = dt_f1 - s1 = dt_p1*(t_f1-t_start)/isi1 - index1 = -1 - else: - t_f1 = t1[1] - dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) - dt_p1 = 0.0 - isi1 = t1[1]-t1[0] - s1 = dt_p1 - index1 = 0 - if t2[0] > t_start: - # dt_p1 = t2[0]-t_start - t_f2 = t2[0] - dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) - dt_p2 = dt_f2 - isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) - s2 = dt_p2*(t_f2-t_start)/isi2 - index2 = -1 - else: - t_f2 = t2[1] - dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) - dt_p2 = 0.0 - isi2 = t2[1]-t2[0] - s2 = dt_p2 - index2 = 0 - - y_starts[0] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) - index = 1 - - while index1+index2 < N1+N2-2: - # print(index, index1, index2) - if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): - index1 += 1 - # first calculate the previous interval end value - s1 = dt_f1*(t_f1-t_p1) / isi1 - # the previous time now was the following time before: - dt_p1 = dt_f1 - t_p1 = t_f1 # t_p1 contains the current time point - # get the next time - if index1 < N1-1: - t_f1 = t1[index1+1] - else: - t_f1 = t_end - spike_events[index] = t_p1 - s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 - y_ends[index-1] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, - isi2) - # now the next interval start value - if index1 < N1-1: - dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, - t_start, t_end) - isi1 = t_f1-t_p1 - s1 = dt_p1 - else: - dt_f1 = dt_p1 - isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) - # s1 needs adjustment due to change of isi1 - s1 = dt_p1*(t_end-t1[N1-1])/isi1 - # s2 is the same as above, thus we can compute y2 immediately - y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, - isi2) - elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): - index2 += 1 - # first calculate the previous interval end value - s2 = dt_f2*(t_f2-t_p2) / isi2 - # the previous time now was the following time before: - dt_p2 = dt_f2 - t_p2 = t_f2 # t_p2 contains the current time point - # get the next time - if index2 < N2-1: - t_f2 = t2[index2+1] - else: - t_f2 = t_end - spike_events[index] = t_p2 - s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 - y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, - isi2) - # now the next interval start value - if index2 < N2-1: - dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, - t_start, t_end) - isi2 = t_f2-t_p2 - s2 = dt_p2 - else: - dt_f2 = dt_p2 - isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) - # s2 needs adjustment due to change of isi2 - s2 = dt_p2*(t_end-t2[N2-1])/isi2 - # s2 is the same as above, thus we can compute y2 immediately - y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) - else: # t_f1 == t_f2 - generate only one event - index1 += 1 - index2 += 1 - t_p1 = t_f1 - t_p2 = t_f2 - dt_p1 = 0.0 - dt_p2 = 0.0 - spike_events[index] = t_f1 - y_ends[index-1] = 0.0 - y_starts[index] = 0.0 - if index1 < N1-1: - t_f1 = t1[index1+1] - dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, - t_start, t_end) - isi1 = t_f1 - t_p1 - else: - t_f1 = t_end - dt_f1 = dt_p1 - isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) - if index2 < N2-1: - t_f2 = t2[index2+1] - dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, - t_start, t_end) - isi2 = t_f2 - t_p2 - else: - t_f2 = t_end - dt_f2 = dt_p2 - isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) - index += 1 - # the last event is the interval end - if spike_events[index-1] == t_end: - index -= 1 - else: - spike_events[index] = t_end - # the ending value of the last interval - isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) - isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) - s1 = dt_f1*(t_end-t1[N1-1])/isi1 - s2 = dt_f2*(t_end-t2[N2-1])/isi2 - y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) - # end nogil - - # use only the data added above - # could be less than original length due to equal spike times - return spike_events[:index+1], y_starts[:index], y_ends[:index] - - - -############################################################ -# coincidence_python -############################################################ -cdef inline double get_tau(double[:] spikes1, double[:] spikes2, - int i, int j, double max_tau): - cdef double m = 1E100 # some huge number - cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 - cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 - if i < N1 and i > -1: - m = fmin(m, spikes1[i+1]-spikes1[i]) - if j < N2 and j > -1: - m = fmin(m, spikes2[j+1]-spikes2[j]) - if i > 0: - m = fmin(m, spikes1[i]-spikes1[i-1]) - if j > 0: - m = fmin(m, spikes2[j]-spikes2[j-1]) - m *= 0.5 - if max_tau > 0.0: - m = fmin(m, max_tau) - return m - - -############################################################ -# coincidence_cython -############################################################ -def coincidence_cython(double[:] spikes1, double[:] spikes2, - double t_start, double t_end, double max_tau): - - cdef int N1 = len(spikes1) - cdef int N2 = len(spikes2) - cdef int i = -1 - cdef int j = -1 - cdef int n = 0 - cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times - cdef double[:] c = np.zeros(N1 + N2 + 2) # coincidences - cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity - cdef double tau - while i + j < N1 + N2 - 2: - if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): - i += 1 - n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) - st[n] = spikes1[i] - if j > -1 and spikes1[i]-spikes2[j] < tau: - # coincidence between the current spike and the previous spike - # both get marked with 1 - c[n] = 1 - c[n-1] = 1 - elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): - j += 1 - n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) - st[n] = spikes2[j] - if i > -1 and spikes2[j]-spikes1[i] < tau: - # coincidence between the current spike and the previous spike - # both get marked with 1 - c[n] = 1 - c[n-1] = 1 - else: # spikes1[i+1] = spikes2[j+1] - # advance in both spike trains - j += 1 - i += 1 - n += 1 - # add only one event, but with coincidence 2 and multiplicity 2 - st[n] = spikes1[i] - c[n] = 2 - mp[n] = 2 - - st = st[:n+2] - c = c[:n+2] - mp = mp[:n+2] - - st[0] = t_start - st[len(st)-1] = t_end - c[0] = c[1] - c[len(c)-1] = c[len(c)-2] - mp[0] = mp[1] - mp[len(mp)-1] = mp[len(mp)-2] - - return st, c, mp diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx new file mode 100644 index 0000000..59a8d30 --- /dev/null +++ b/pyspike/cython/cython_profiles.pyx @@ -0,0 +1,423 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_profiles.pyx + +cython implementation of the isi-, spike- and spike-sync profiles + +Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects +improves the performance of spike_distance by a factor of 10! + +Copyright 2014, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_profiles.pyx + +which gives:: + + cython_profiles.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport fabs +from libc.math cimport fmax +from libc.math cimport fmin + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +############################################################ +# isi_profile_cython +############################################################ +def isi_profile_cython(double[:] s1, double[:] s2, + double t_start, double t_end): + + cdef double[:] spike_events + cdef double[:] isi_values + cdef int index1, index2, index + cdef int N1, N2 + cdef double nu1, nu2 + N1 = len(s1) + N2 = len(s2) + + spike_events = np.empty(N1+N2+2) + # the values have one entry less as they are defined at the intervals + isi_values = np.empty(N1+N2+1) + + # first x-value of the profile + spike_events[0] = t_start + + # first interspike interval - check if a spike exists at the start time + if s1[0] > t_start: + # edge correction + nu1 = fmax(s1[0]-t_start, s1[1]-s1[0]) + index1 = -1 + else: + nu1 = s1[1]-s1[0] + index1 = 0 + + if s2[0] > t_start: + # edge correction + nu2 = fmax(s2[0]-t_start, s2[1]-s2[0]) + index2 = -1 + else: + nu2 = s2[1]-s2[0] + index2 = 0 + + isi_values[0] = fabs(nu1-nu2)/fmax(nu1, nu2) + index = 1 + + with nogil: # release the interpreter to allow multithreading + while index1+index2 < N1+N2-2: + # check which spike is next, only if there are spikes left in 1 + # next spike in 1 is earlier, or there are no spikes left in 2 + if (index1 < N1-1) and ((index2 == N2-1) or + (s1[index1+1] < s2[index2+1])): + index1 += 1 + spike_events[index] = s1[index1] + if index1 < N1-1: + nu1 = s1[index1+1]-s1[index1] + else: + # edge correction + nu1 = fmax(t_end-s1[index1], nu1) + elif (index2 < N2-1) and ((index1 == N1-1) or + (s1[index1+1] > s2[index2+1])): + index2 += 1 + spike_events[index] = s2[index2] + if index2 < N2-1: + nu2 = s2[index2+1]-s2[index2] + else: + # edge correction + nu2 = fmax(t_end-s2[index2], nu2) + else: # s1[index1+1] == s2[index2+1] + index1 += 1 + index2 += 1 + spike_events[index] = s1[index1] + if index1 < N1-1: + nu1 = s1[index1+1]-s1[index1] + else: + # edge correction + nu1 = fmax(t_end-s1[index1], nu1) + if index2 < N2-1: + nu2 = s2[index2+1]-s2[index2] + else: + # edge correction + nu2 = fmax(t_end-s2[index2], nu2) + # compute the corresponding isi-distance + isi_values[index] = fabs(nu1 - nu2) / fmax(nu1, nu2) + index += 1 + # the last event is the interval end + if spike_events[index-1] == t_end: + index -= 1 + else: + spike_events[index] = t_end + # end nogil + + return spike_events[:index+1], isi_values[:index] + + +############################################################ +# get_min_dist_cython +############################################################ +cdef inline double get_min_dist_cython(double spike_time, + double[:] spike_train, + # use memory view to ensure inlining + # np.ndarray[DTYPE_t,ndim=1] spike_train, + int N, + int start_index, + double t_start, double t_end) nogil: + """ Returns the minimal distance |spike_time - spike_train[i]| + with i>=start_index. + """ + cdef double d, d_temp + # start with the distance to the start time + d = fabs(spike_time - t_start) + if start_index < 0: + start_index = 0 + while start_index < N: + d_temp = fabs(spike_time - spike_train[start_index]) + if d_temp > d: + return d + else: + d = d_temp + start_index += 1 + + # finally, check the distance to end time + d_temp = fabs(t_end - spike_time) + if d_temp > d: + return d + else: + return d_temp + + +############################################################ +# isi_avrg_cython +############################################################ +cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: + return 0.5*(isi1+isi2)*(isi1+isi2) + # alternative definition to obtain ~ 0.5 for Poisson spikes + # return 0.5*(isi1*isi1+isi2*isi2) + + +############################################################ +# spike_profile_cython +############################################################ +def spike_profile_cython(double[:] t1, double[:] t2, + double t_start, double t_end): + + cdef double[:] spike_events + cdef double[:] y_starts + cdef double[:] y_ends + + cdef int N1, N2, index1, index2, index + cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 + cdef double isi1, isi2, s1, s2 + + N1 = len(t1) + N2 = len(t2) + + spike_events = np.empty(N1+N2+2) + + y_starts = np.empty(len(spike_events)-1) + y_ends = np.empty(len(spike_events)-1) + + with nogil: # release the interpreter to allow multithreading + spike_events[0] = t_start + t_p1 = t_start + t_p2 = t_start + if t1[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f1 = t1[0] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) + dt_p1 = dt_f1 + s1 = dt_p1*(t_f1-t_start)/isi1 + index1 = -1 + else: + t_f1 = t1[1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + dt_p1 = 0.0 + isi1 = t1[1]-t1[0] + s1 = dt_p1 + index1 = 0 + if t2[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f2 = t2[0] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = dt_f2 + isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) + s2 = dt_p2*(t_f2-t_start)/isi2 + index2 = -1 + else: + t_f2 = t2[1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = 0.0 + isi2 = t2[1]-t2[0] + s2 = dt_p2 + index2 = 0 + + y_starts[0] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + index = 1 + + while index1+index2 < N1+N2-2: + # print(index, index1, index2) + if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): + index1 += 1 + # first calculate the previous interval end value + s1 = dt_f1*(t_f1-t_p1) / isi1 + # the previous time now was the following time before: + dt_p1 = dt_f1 + t_p1 = t_f1 # t_p1 contains the current time point + # get the next time + if index1 < N1-1: + t_f1 = t1[index1+1] + else: + t_f1 = t_end + spike_events[index] = t_p1 + s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 + y_ends[index-1] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, + isi2) + # now the next interval start value + if index1 < N1-1: + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1-t_p1 + s1 = dt_p1 + else: + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # s1 needs adjustment due to change of isi1 + s1 = dt_p1*(t_end-t1[N1-1])/isi1 + # s2 is the same as above, thus we can compute y2 immediately + y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, + isi2) + elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): + index2 += 1 + # first calculate the previous interval end value + s2 = dt_f2*(t_f2-t_p2) / isi2 + # the previous time now was the following time before: + dt_p2 = dt_f2 + t_p2 = t_f2 # t_p2 contains the current time point + # get the next time + if index2 < N2-1: + t_f2 = t2[index2+1] + else: + t_f2 = t_end + spike_events[index] = t_p2 + s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 + y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, + isi2) + # now the next interval start value + if index2 < N2-1: + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2-t_p2 + s2 = dt_p2 + else: + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + # s2 needs adjustment due to change of isi2 + s2 = dt_p2*(t_end-t2[N2-1])/isi2 + # s2 is the same as above, thus we can compute y2 immediately + y_starts[index] = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + else: # t_f1 == t_f2 - generate only one event + index1 += 1 + index2 += 1 + t_p1 = t_f1 + t_p2 = t_f2 + dt_p1 = 0.0 + dt_p2 = 0.0 + spike_events[index] = t_f1 + y_ends[index-1] = 0.0 + y_starts[index] = 0.0 + if index1 < N1-1: + t_f1 = t1[index1+1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1 - t_p1 + else: + t_f1 = t_end + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + if index2 < N2-1: + t_f2 = t2[index2+1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2 - t_p2 + else: + t_f2 = t_end + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + index += 1 + # the last event is the interval end + if spike_events[index-1] == t_end: + index -= 1 + else: + spike_events[index] = t_end + # the ending value of the last interval + isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + s1 = dt_f1*(t_end-t1[N1-1])/isi1 + s2 = dt_f2*(t_end-t2[N2-1])/isi2 + y_ends[index-1] = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + # end nogil + + # use only the data added above + # could be less than original length due to equal spike times + return spike_events[:index+1], y_starts[:index], y_ends[:index] + + + +############################################################ +# get_tau +############################################################ +cdef inline double get_tau(double[:] spikes1, double[:] spikes2, + int i, int j, double max_tau): + cdef double m = 1E100 # some huge number + cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 + cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 + if i < N1 and i > -1: + m = fmin(m, spikes1[i+1]-spikes1[i]) + if j < N2 and j > -1: + m = fmin(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = fmin(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = fmin(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = fmin(m, max_tau) + return m + + +############################################################ +# coincidence_profile_cython +############################################################ +def coincidence_profile_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef int n = 0 + cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times + cdef double[:] c = np.zeros(N1 + N2 + 2) # coincidences + cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes1[i] + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + c[n] = 1 + c[n-1] = 1 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + n += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + st[n] = spikes2[j] + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + c[n] = 1 + c[n-1] = 1 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + n += 1 + # add only one event, but with coincidence 2 and multiplicity 2 + st[n] = spikes1[i] + c[n] = 2 + mp[n] = 2 + + st = st[:n+2] + c = c[:n+2] + mp = mp[:n+2] + + st[0] = t_start + st[len(st)-1] = t_end + c[0] = c[1] + c[len(c)-1] = c[len(c)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + + return st, c, mp diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index 8b1e9bf..164378d 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -31,18 +31,18 @@ def isi_profile(spike_train1, spike_train2): # load cython implementation try: - from cython.cython_distance import isi_distance_cython \ - as isi_distance_impl + from cython.cython_profiles import isi_profile_cython \ + as isi_profile_impl except ImportError: print("Warning: isi_distance_cython not found. Make sure that PySpike \ is installed by running\n 'python setup.py build_ext --inplace'!\n \ Falling back to slow python backend.") # use python backend from cython.python_backend import isi_distance_python \ - as isi_distance_impl + as isi_profile_impl - times, values = isi_distance_impl(spike_train1.spikes, spike_train2.spikes, - spike_train1.t_start, spike_train1.t_end) + times, values = isi_profile_impl(spike_train1.spikes, spike_train2.spikes, + spike_train1.t_start, spike_train1.t_end) return PieceWiseConstFunc(times, values) diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index 499ab77..3567585 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -31,20 +31,20 @@ def spike_profile(spike_train1, spike_train2): # cython implementation try: - from cython.cython_distance import spike_distance_cython \ - as spike_distance_impl + from cython.cython_profiles import spike_profile_cython \ + as spike_profile_impl except ImportError: - print("Warning: spike_distance_cython not found. Make sure that \ + print("Warning: spike_profile_cython not found. Make sure that \ PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ Falling back to slow python backend.") # use python backend from cython.python_backend import spike_distance_python \ - as spike_distance_impl + as spike_profile_impl - times, y_starts, y_ends = spike_distance_impl(spike_train1.spikes, - spike_train2.spikes, - spike_train1.t_start, - spike_train1.t_end) + times, y_starts, y_ends = spike_profile_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end) return PieceWiseLinFunc(times, y_starts, y_ends) diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 7d429e4..107734d 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -36,24 +36,23 @@ def spike_sync_profile(spike_train1, spike_train2, max_tau=None): # cython implementation try: - from cython.cython_distance import coincidence_cython \ - as coincidence_impl + from cython.cython_profiles import coincidence_profile_cython \ + as coincidence_profile_impl except ImportError: print("Warning: spike_distance_cython not found. Make sure that \ PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \ Falling back to slow python backend.") # use python backend from cython.python_backend import coincidence_python \ - as coincidence_impl + as coincidence_profile_impl if max_tau is None: max_tau = 0.0 - times, coincidences, multiplicity = coincidence_impl(spike_train1.spikes, - spike_train2.spikes, - spike_train1.t_start, - spike_train1.t_end, - max_tau) + times, coincidences, multiplicity \ + = coincidence_profile_impl(spike_train1.spikes, spike_train2.spikes, + spike_train1.t_start, spike_train1.t_end, + max_tau) return DiscreteFunc(times, coincidences, multiplicity) diff --git a/setup.py b/setup.py index 289d521..d687240 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ else: use_cython = True if os.path.isfile("pyspike/cython/cython_add.c") and \ - os.path.isfile("pyspike/cython/cython_distance.c"): + os.path.isfile("pyspike/cython/cython_profiles.c"): use_c = True else: use_c = False @@ -33,13 +33,13 @@ ext_modules = [] if use_cython: # Cython is available, compile .pyx -> .c ext_modules += [ Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.pyx"]), - Extension("pyspike.cython.cython_distance", ["pyspike/cython/cython_distance.pyx"]), + Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.pyx"]), ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries ext_modules += [ Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.c"]), - Extension("pyspike.cython.cython_distance", ["pyspike/cython/cython_distance.c"]), + Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.c"]), ] # neither cython nor c files available -> automatic fall-back to python backend @@ -78,7 +78,7 @@ train similarity', 'Programming Language :: Python :: 2.7', ], package_data={ - 'pyspike': ['cython/cython_add.c', 'cython/cython_distance.c'], + 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c'], 'test': ['Spike_testdata.txt'] } ) -- cgit v1.2.3 From f688dc2e8616f914040746de845646abb158125d Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Fri, 8 May 2015 18:06:59 +0200 Subject: introduce backend for distance function isi- and spike distances over complete intervals are now computed without obtaining the profile first. This gives more than x2 performance improvements. --- examples/performance.py | 8 +- pyspike/cython/cython_distances.pyx | 328 ++++++++++++++++++++++++++++++++++++ pyspike/cython/cython_profiles.pyx | 2 +- pyspike/isi_distance.py | 16 +- pyspike/spike_distance.py | 17 +- setup.py | 5 +- 6 files changed, 371 insertions(+), 5 deletions(-) create mode 100644 pyspike/cython/cython_distances.pyx diff --git a/examples/performance.py b/examples/performance.py index 94dae25..1c31e8f 100644 --- a/examples/performance.py +++ b/examples/performance.py @@ -1,4 +1,10 @@ -""" Compute distances of large sets of spike trains for performance tests +""" +Compute distances of large sets of spike trains for performance tests + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + """ from __future__ import print_function diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx new file mode 100644 index 0000000..65c2872 --- /dev/null +++ b/pyspike/cython/cython_distances.pyx @@ -0,0 +1,328 @@ +#cython: boundscheck=False +#cython: wraparound=False +#cython: cdivision=True + +""" +cython_distances.pyx + +cython implementation of the isi-, spike- and spike-sync distances + +Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects +improves the performance of spike_distance by a factor of 10! + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License + +""" + +""" +To test whether things can be optimized: remove all yellow stuff +in the html output:: + + cython -a cython_distances.pyx + +which gives:: + + cython_distances.html + +""" + +import numpy as np +cimport numpy as np + +from libc.math cimport fabs +from libc.math cimport fmax +from libc.math cimport fmin + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + + +############################################################ +# isi_distance_cython +############################################################ +def isi_distance_cython(double[:] s1, double[:] s2, + double t_start, double t_end): + + cdef double isi_value + cdef int index1, index2, index + cdef int N1, N2 + cdef double nu1, nu2 + cdef double last_t, curr_t, curr_isi + isi_value = 0.0 + N1 = len(s1) + N2 = len(s2) + + # first interspike interval - check if a spike exists at the start time + if s1[0] > t_start: + # edge correction + nu1 = fmax(s1[0]-t_start, s1[1]-s1[0]) + index1 = -1 + else: + nu1 = s1[1]-s1[0] + index1 = 0 + + if s2[0] > t_start: + # edge correction + nu2 = fmax(s2[0]-t_start, s2[1]-s2[0]) + index2 = -1 + else: + nu2 = s2[1]-s2[0] + index2 = 0 + + last_t = t_start + curr_isi = fabs(nu1-nu2)/fmax(nu1, nu2) + index = 1 + + with nogil: # release the interpreter to allow multithreading + while index1+index2 < N1+N2-2: + # check which spike is next, only if there are spikes left in 1 + # next spike in 1 is earlier, or there are no spikes left in 2 + if (index1 < N1-1) and ((index2 == N2-1) or + (s1[index1+1] < s2[index2+1])): + index1 += 1 + curr_t = s1[index1] + if index1 < N1-1: + nu1 = s1[index1+1]-s1[index1] + else: + # edge correction + nu1 = fmax(t_end-s1[index1], nu1) + elif (index2 < N2-1) and ((index1 == N1-1) or + (s1[index1+1] > s2[index2+1])): + index2 += 1 + curr_t = s2[index2] + if index2 < N2-1: + nu2 = s2[index2+1]-s2[index2] + else: + # edge correction + nu2 = fmax(t_end-s2[index2], nu2) + else: # s1[index1+1] == s2[index2+1] + index1 += 1 + index2 += 1 + curr_t = s1[index1] + if index1 < N1-1: + nu1 = s1[index1+1]-s1[index1] + else: + # edge correction + nu1 = fmax(t_end-s1[index1], nu1) + if index2 < N2-1: + nu2 = s2[index2+1]-s2[index2] + else: + # edge correction + nu2 = fmax(t_end-s2[index2], nu2) + # compute the corresponding isi-distance + isi_value += curr_isi * (curr_t - last_t) + curr_isi = fabs(nu1 - nu2) / fmax(nu1, nu2) + last_t = curr_t + index += 1 + + isi_value += curr_isi * (t_end - last_t) + # end nogil + + return isi_value / (t_end-t_start) + + +############################################################ +# get_min_dist_cython +############################################################ +cdef inline double get_min_dist_cython(double spike_time, + double[:] spike_train, + # use memory view to ensure inlining + # np.ndarray[DTYPE_t,ndim=1] spike_train, + int N, + int start_index, + double t_start, double t_end) nogil: + """ Returns the minimal distance |spike_time - spike_train[i]| + with i>=start_index. + """ + cdef double d, d_temp + # start with the distance to the start time + d = fabs(spike_time - t_start) + if start_index < 0: + start_index = 0 + while start_index < N: + d_temp = fabs(spike_time - spike_train[start_index]) + if d_temp > d: + return d + else: + d = d_temp + start_index += 1 + + # finally, check the distance to end time + d_temp = fabs(t_end - spike_time) + if d_temp > d: + return d + else: + return d_temp + + +############################################################ +# isi_avrg_cython +############################################################ +cdef inline double isi_avrg_cython(double isi1, double isi2) nogil: + return 0.5*(isi1+isi2)*(isi1+isi2) + # alternative definition to obtain ~ 0.5 for Poisson spikes + # return 0.5*(isi1*isi1+isi2*isi2) + + +############################################################ +# spike_distance_cython +############################################################ +def spike_distance_cython(double[:] t1, double[:] t2, + double t_start, double t_end): + + cdef int N1, N2, index1, index2, index + cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2 + cdef double isi1, isi2, s1, s2 + cdef double y_start, y_end, t_last, t_current, spike_value + + spike_value = 0.0 + + N1 = len(t1) + N2 = len(t2) + + with nogil: # release the interpreter to allow multithreading + t_last = t_start + t_p1 = t_start + t_p2 = t_start + if t1[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f1 = t1[0] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + isi1 = fmax(t_f1-t_start, t1[1]-t1[0]) + dt_p1 = dt_f1 + s1 = dt_p1*(t_f1-t_start)/isi1 + index1 = -1 + else: + t_f1 = t1[1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end) + dt_p1 = 0.0 + isi1 = t1[1]-t1[0] + s1 = dt_p1 + index1 = 0 + if t2[0] > t_start: + # dt_p1 = t2[0]-t_start + t_f2 = t2[0] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = dt_f2 + isi2 = fmax(t_f2-t_start, t2[1]-t2[0]) + s2 = dt_p2*(t_f2-t_start)/isi2 + index2 = -1 + else: + t_f2 = t2[1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end) + dt_p2 = 0.0 + isi2 = t2[1]-t2[0] + s2 = dt_p2 + index2 = 0 + + y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + index = 1 + + while index1+index2 < N1+N2-2: + # print(index, index1, index2) + if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1): + index1 += 1 + # first calculate the previous interval end value + s1 = dt_f1*(t_f1-t_p1) / isi1 + # the previous time now was the following time before: + dt_p1 = dt_f1 + t_p1 = t_f1 # t_p1 contains the current time point + # get the next time + if index1 < N1-1: + t_f1 = t1[index1+1] + else: + t_f1 = t_end + t_curr = t_p1 + s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2 + y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index1 < N1-1: + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1-t_p1 + s1 = dt_p1 + else: + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # s1 needs adjustment due to change of isi1 + s1 = dt_p1*(t_end-t1[N1-1])/isi1 + # s2 is the same as above, thus we can compute y2 immediately + y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1): + index2 += 1 + # first calculate the previous interval end value + s2 = dt_f2*(t_f2-t_p2) / isi2 + # the previous time now was the following time before: + dt_p2 = dt_f2 + t_p2 = t_f2 # t_p2 contains the current time point + # get the next time + if index2 < N2-1: + t_f2 = t2[index2+1] + else: + t_f2 = t_end + t_curr = t_p2 + s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1 + y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + + # now the next interval start value + if index2 < N2-1: + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2-t_p2 + s2 = dt_p2 + else: + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + # s2 needs adjustment due to change of isi2 + s2 = dt_p2*(t_end-t2[N2-1])/isi2 + # s1 is the same as above, thus we can compute y2 immediately + y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2) + else: # t_f1 == t_f2 - generate only one event + index1 += 1 + index2 += 1 + t_p1 = t_f1 + t_p2 = t_f2 + dt_p1 = 0.0 + dt_p2 = 0.0 + t_curr = t_f1 + y_end = 0.0 + spike_value += 0.5*(y_start + y_end) * (t_curr - t_last) + y_start = 0.0 + if index1 < N1-1: + t_f1 = t1[index1+1] + dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2, + t_start, t_end) + isi1 = t_f1 - t_p1 + else: + t_f1 = t_end + dt_f1 = dt_p1 + isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + if index2 < N2-1: + t_f2 = t2[index2+1] + dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1, + t_start, t_end) + isi2 = t_f2 - t_p2 + else: + t_f2 = t_end + dt_f2 = dt_p2 + isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + index += 1 + t_last = t_curr + # isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2]) + # isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2]) + s1 = dt_f1*(t_end-t1[N1-1])/isi1 + s2 = dt_f2*(t_end-t2[N2-1])/isi2 + y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2) + spike_value += 0.5*(y_start + y_end) * (t_end - t_last) + # end nogil + + # use only the data added above + # could be less than original length due to equal spike times + return spike_value / (t_end-t_start) diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index 59a8d30..3690091 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -10,7 +10,7 @@ cython implementation of the isi-, spike- and spike-sync profiles Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects improves the performance of spike_distance by a factor of 10! -Copyright 2014, Mario Mulansky +Copyright 2014-2015, Mario Mulansky Distributed under the BSD License diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index 164378d..2a1ed3a 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -66,7 +66,21 @@ def isi_distance(spike_train1, spike_train2, interval=None): :returns: The isi-distance :math:`D_I`. :rtype: double """ - return isi_profile(spike_train1, spike_train2).avrg(interval) + + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from cython.cython_distances import isi_distance_cython \ + as isi_distance_impl + return isi_distance_impl(spike_train1.spikes, spike_train2.spikes, + spike_train1.t_start, spike_train1.t_end) + except ImportError: + # Cython backend not available: fall back to profile averaging + return isi_profile(spike_train1, spike_train2).avrg(interval) + else: + # some specific interval is provided: use profile + return isi_profile(spike_train1, spike_train2).avrg(interval) ############################################################ diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index 3567585..d727fa2 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -68,7 +68,22 @@ def spike_distance(spike_train1, spike_train2, interval=None): :rtype: double """ - return spike_profile(spike_train1, spike_train2).avrg(interval) + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from cython.cython_distances import spike_distance_cython \ + as spike_distance_impl + return spike_distance_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end) + except ImportError: + # Cython backend not available: fall back to average profile + return spike_profile(spike_train1, spike_train2).avrg(interval) + else: + # some specific interval is provided: compute the whole profile + return spike_profile(spike_train1, spike_train2).avrg(interval) ############################################################ diff --git a/setup.py b/setup.py index d687240..7902066 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,8 @@ else: use_cython = True if os.path.isfile("pyspike/cython/cython_add.c") and \ - os.path.isfile("pyspike/cython/cython_profiles.c"): + os.path.isfile("pyspike/cython/cython_profiles.c") and \ + os.path.isfile("pyspike/cython/cython_distances.c"): use_c = True else: use_c = False @@ -34,12 +35,14 @@ if use_cython: # Cython is available, compile .pyx -> .c ext_modules += [ Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.pyx"]), Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.pyx"]), + Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.pyx"]), ] cmdclass.update({'build_ext': build_ext}) elif use_c: # c files are there, compile to binaries ext_modules += [ Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.c"]), Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.c"]), + Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.c"]), ] # neither cython nor c files available -> automatic fall-back to python backend -- cgit v1.2.3 From ad7961c9f73d77b87ee23349169618d128418a51 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 12:06:19 +0200 Subject: performance improvement for spike sync Additional cython implementation for overall spike sync values. It is not necessary to compute the profile anymore if only the spike sync value is required. 3x performance gain. --- pyspike/cython/cython_distances.pyx | 64 +++++++++++++++++++++++++++++++++++++ pyspike/spike_sync.py | 46 ++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 6 deletions(-) diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index 65c2872..bf90638 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -326,3 +326,67 @@ def spike_distance_cython(double[:] t1, double[:] t2, # use only the data added above # could be less than original length due to equal spike times return spike_value / (t_end-t_start) + + + +############################################################ +# get_tau +############################################################ +cdef inline double get_tau(double[:] spikes1, double[:] spikes2, + int i, int j, double max_tau): + cdef double m = 1E100 # some huge number + cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 + cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 + if i < N1 and i > -1: + m = fmin(m, spikes1[i+1]-spikes1[i]) + if j < N2 and j > -1: + m = fmin(m, spikes2[j+1]-spikes2[j]) + if i > 0: + m = fmin(m, spikes1[i]-spikes1[i-1]) + if j > 0: + m = fmin(m, spikes2[j]-spikes2[j-1]) + m *= 0.5 + if max_tau > 0.0: + m = fmin(m, max_tau) + return m + + +############################################################ +# coincidence_value_cython +############################################################ +def coincidence_value_cython(double[:] spikes1, double[:] spikes2, + double t_start, double t_end, double max_tau): + + cdef int N1 = len(spikes1) + cdef int N2 = len(spikes2) + cdef int i = -1 + cdef int j = -1 + cdef double coinc = 0.0 + cdef double mp = 0.0 + cdef double tau + while i + j < N1 + N2 - 2: + if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): + i += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + if j > -1 and spikes1[i]-spikes2[j] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + coinc += 2 + elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): + j += 1 + mp += 1 + tau = get_tau(spikes1, spikes2, i, j, max_tau) + if i > -1 and spikes2[j]-spikes1[i] < tau: + # coincidence between the current spike and the previous spike + # both get marked with 1 + coinc += 2 + else: # spikes1[i+1] = spikes2[j+1] + # advance in both spike trains + j += 1 + i += 1 + # add only one event, but with coincidence 2 and multiplicity 2 + mp += 2 + coinc += 2 + + return coinc, mp diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py index 107734d..40d98d2 100644 --- a/pyspike/spike_sync.py +++ b/pyspike/spike_sync.py @@ -57,6 +57,40 @@ Falling back to slow python backend.") return DiscreteFunc(times, coincidences, multiplicity) +############################################################ +# _spike_sync_values +############################################################ +def _spike_sync_values(spike_train1, spike_train2, interval, max_tau): + """" Internal function. Computes the summed coincidences and multiplicity + for spike synchronization of the two given spike trains. + + Do not call this function directly, use `spike_sync` or `spike_sync_multi` + instead. + """ + if interval is None: + # distance over the whole interval is requested: use specific function + # for optimal performance + try: + from cython.cython_distances import coincidence_value_cython \ + as coincidence_value_impl + if max_tau is None: + max_tau = 0.0 + c, mp = coincidence_value_impl(spike_train1.spikes, + spike_train2.spikes, + spike_train1.t_start, + spike_train1.t_end, + max_tau) + return c, mp + except ImportError: + # Cython backend not available: fall back to profile averaging + return spike_sync_profile(spike_train1, spike_train2, + max_tau).integral(interval) + else: + # some specific interval is provided: use profile + return spike_sync_profile(spike_train1, spike_train2, + max_tau).integral(interval) + + ############################################################ # spike_sync ############################################################ @@ -80,8 +114,8 @@ def spike_sync(spike_train1, spike_train2, interval=None, max_tau=None): :rtype: `double` """ - return spike_sync_profile(spike_train1, spike_train2, - max_tau).avrg(interval) + c, mp = _spike_sync_values(spike_train1, spike_train2, interval, max_tau) + return 1.0*c/mp ############################################################ @@ -144,10 +178,10 @@ def spike_sync_multi(spike_trains, indices=None, interval=None, max_tau=None): coincidence = 0.0 mp = 0.0 for (i, j) in pairs: - profile = spike_sync_profile(spike_trains[i], spike_trains[j]) - summed_vals = profile.integral(interval) - coincidence += summed_vals[0] - mp += summed_vals[1] + c, m = _spike_sync_values(spike_trains[i], spike_trains[j], + interval, max_tau) + coincidence += c + mp += m return coincidence/mp -- cgit v1.2.3 From f982bb4870de078b77f0c9fa3230128f8dfe640c Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 12:10:30 +0200 Subject: +tutorial, reference, contributors order --- Contributors.txt | 6 +- Readme.rst | 8 +-- doc/tutorial.rst | 213 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 220 insertions(+), 7 deletions(-) create mode 100644 doc/tutorial.rst diff --git a/Contributors.txt b/Contributors.txt index 0d4afee..83563c7 100644 --- a/Contributors.txt +++ b/Contributors.txt @@ -3,10 +3,10 @@ Python/C Programming: Scientific Methods: - Thomas Kreuz -- Nebojsa D. Bozanic -- Mario Mulansky -- Conor Houghton - Daniel Chicharro +- Conor Houghton +- Nebojsa Bozanic +- Mario Mulansky The work on PySpike was supported by the European Comission through the Marie diff --git a/Readme.rst b/Readme.rst index b8858f5..daacbea 100644 --- a/Readme.rst +++ b/Readme.rst @@ -17,7 +17,7 @@ All source codes are available on `Github `_ -.. [#] Kreuz T, Mulansky M and Bozanic N, *SPIKY: A graphical user interface for monitoring spike train synchrony*, tbp (2015) +.. [#] Kreuz T, Mulansky M and Bozanic N, *SPIKY: A graphical user interface for monitoring spike train synchrony*, J Neurophysiol, in press (2015) Important Changelog ----------------------------- @@ -114,10 +114,10 @@ Curie Initial Training Network* `Neural Engineering Transformative Technologies **Scientific Methods:** - Thomas Kreuz - - Nebojsa D. Bozanic - - Mario Mulansky - - Conor Houghton - Daniel Chicharro + - Conor Houghton + - Nebojsa Bozanic + - Mario Mulansky .. _ISI: http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony#ISI-distance .. _SPIKE: http://www.scholarpedia.org/article/SPIKE-distance diff --git a/doc/tutorial.rst b/doc/tutorial.rst new file mode 100644 index 0000000..f7fc20b --- /dev/null +++ b/doc/tutorial.rst @@ -0,0 +1,213 @@ +Spike trains +------------ + +In PySpike, spike trains are represented by :class:`.SpikeTrain` objects. +A :class:`.SpikeTrain` object consists of the spike times given as `numpy` arrays as well as the edges of the spike train as :code:`[t_start, t_end]`. +The following code creates such a spike train with some arbitrary spike times: + +.. code:: python + + import numpy as np + from pyspike import SpikeTrain + + spike_train = SpikeTrain(np.array([0.1, 0.3, 0.45, 0.6, 0.9], [0.0, 1.0])) + +Loading from text files +....................... + +Typically, spike train data is loaded into PySpike from data files. +The most straight-forward data files are text files where each line represents one spike train given as an sequence of spike times. +An exemplary file with several spike trains is `PySpike_testdata.txt `_. +To quickly obtain spike trains from such files, PySpike provides the function :func:`.load_spike_trains_from_txt`. + +.. code:: python + + import numpy as np + import pyspike as spk + + spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt", + edges=(0, 4000)) + +This function expects the name of the data file as first parameter. +Furthermore, the time interval of the spike train measurement (edges of the spike trains) should be provided as a pair of start- and end-time values. +Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function). +As result, :func:`.load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file. + + +Computing bivariate distances profiles +--------------------------------------- + +**Important note:** + +------------------------------ + + Spike trains are expected to be *sorted*! + For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted. + Make sure that all your spike trains are sorted, which is ensured if you use the :func:`.load_spike_trains_from_txt` function with the parameter `is_sorted=False` (default). + If in doubt, use :meth:`.SpikeTrain.sort()` to ensure a correctly sorted spike train. + + If you need to copy a spike train, use the :meth:`.SpikeTrain.copy()` method. + Simple assignment `t2 = t1` does not create a copy of the spike train data, but a reference as `numpy.array` is used for storing the data. + +------------------------------ + +ISI-distance +............ + +The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :class:`.SpikeTrain` s, and plots it with matplotlib: + +.. code:: python + + import matplotlib.pyplot as plt + import pyspike as spk + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + edges=(0, 4000)) + isi_profile = spk.isi_profile(spike_trains[0], spike_trains[1]) + x, y = isi_profile.get_plottable_data() + plt.plot(x, y, '--k') + print("ISI distance: %.8f" % isi_profile.avrg()) + plt.show() + +The ISI-profile is a piece-wise constant function, and hence the function :func:`.isi_profile` returns an instance of the :class:`.PieceWiseConstFunc` class. +As shown above, this class allows you to obtain arrays that can be used to plot the function with :code:`plt.plt`, but also to compute the time average, which amounts to the final scalar ISI-distance. +By default, the time average is computed for the whole :class:`.PieceWiseConstFunc` function. +However, it is also possible to obtain the average of a specific interval by providing a pair of floats defining the start and end of the interval. +For the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals: + +.. code:: python + + isi1 = isi_profile.avrg(interval=(0, 1000)) + isi2 = isi_profile.avrg(interval=(1000, 2000)) + isi3 = isi_profile.avrg(interval=[(0, 1000), (2000, 3000)]) + isi4 = isi_profile.avrg(interval=[(1000, 2000), (3000, 4000)]) + +Note, how also multiple intervals can be supplied by giving a list of tuples. + +If you are only interested in the scalar ISI-distance and not the profile, you can simply use: + +.. code:: python + + isi_dist = spk.isi_distance(spike_trains[0], spike_trains[1], interval) + +where :code:`interval` is optional, as above, and if omitted the ISI-distance is computed for the complete spike trains. + + +SPIKE-distance +.............. + +To compute for the spike distance profile you use the function :func:`.spike_profile` instead of :code:`isi_profile` above. +But the general approach is very similar: + +.. code:: python + + import matplotlib.pyplot as plt + import pyspike as spk + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + edges=(0, 4000)) + spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1]) + x, y = spike_profile.get_plottable_data() + plt.plot(x, y, '--k') + print("SPIKE distance: %.8f" % spike_profile.avrg()) + plt.show() + +This short example computes and plots the SPIKE-profile of the first two spike trains in the file :code:`PySpike_testdata.txt`. +In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :class:`.PieceWiseLinFunc` object. +Just like the :class:`.PieceWiseConstFunc` for the ISI-profile, the :class:`.PieceWiseLinFunc` provides a :meth:`.PieceWiseLinFunc.get_plottable_data` member function that returns arrays that can be used directly to plot the function. +Furthermore, the :meth:`.PieceWiseLinFunc.avrg` member function returns the average of the profile defined as the overall SPIKE distance. +As above, you can provide an interval as a pair of floats as well as a sequence of such pairs to :code:`avrg` to specify the averaging interval if required. + +Again, you can use + +.. code:: python + + spike_dist = spk.spike_distance(spike_trains[0], spike_trains[1], interval) + +to compute the SPIKE distance directly, if you are not interested in the profile at all. +The parameter :code:`interval` is optional and if neglected the whole spike train is used. + + +SPIKE synchronization +..................... + +**Important note:** + +------------------------------ + + SPIKE-Synchronization measures *similarity*. + That means, a value of zero indicates absence of synchrony, while a value of one denotes the presence of synchrony. + This is exactly opposite to the other two measures: ISI- and SPIKE-distance. + +---------------------- + + +SPIKE synchronization is another approach to measure spike synchrony. +In contrast to the SPIKE- and ISI-distance, it measures similarity instead of dissimilarity, i.e. higher values represent larger synchrony. +Another difference is that the SPIKE synchronization profile is only defined exactly at the spike times, not for the whole interval of the spike trains. +Therefore, it is represented by a :class:`.DiscreteFunction`. + +To compute for the spike synchronization profile, PySpike provides the function :func:`.spike_sync_profile`. +The general handling of the profile, however, is similar to the other profiles above: + +.. code:: python + + import matplotlib.pyplot as plt + import pyspike as spk + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + edges=(0, 4000)) + spike_profile = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) + x, y = spike_profile.get_plottable_data() + +For the direct computation of the overall spike synchronization value within some interval, the :func:`.spike_sync` function can be used: + +.. code:: python + + spike_sync = spk.spike_sync(spike_trains[0], spike_trains[1], interval) + + +Computing multivariate profiles and distances +---------------------------------------------- + +To compute the multivariate ISI-profile, SPIKE-profile or SPIKE-Synchronization profile f a set of spike trains, PySpike provides multi-variate version of the profile function. +The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :func:`.isi_profile_multi`, :func:`.spike_profile_multi`, :func:`.spike_sync_profile_multi` functions: + +.. code:: python + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + edges=(0, 4000)) + avrg_isi_profile = spk.isi_profile_multi(spike_trains) + avrg_spike_profile = spk.spike_profile_multi(spike_trains) + avrg_spike_sync_profile = spk.spike_sync_profile_multi(spike_trains) + +All functions take an optional parameter :code:`indices`, a list of indices that allows to define the spike trains that should be used for the multivariate profile. +As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :func:`.isi_distance_multi`, :func:`.spike_distance_multi` and :func:`.spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value. +Those functions also accept an :code:`interval` parameter that can be used to specify the begin and end of the averaging interval as a pair of floats, if neglected the complete interval is used. + +Another option to characterize large sets of spike trains are distance matrices. +Each entry in the distance matrix represents a bivariate distance (similarity for SPIKE-Synchronization) of two spike trains. +The distance matrix is symmetric and has zero values (ones) at the diagonal and is computed with the functions :func:`.isi_distance_matrix`, :func:`.spike_distance_matrix` and :func:`.spike_sync_matrix`. +The following example computes and plots the ISI- and SPIKE-distance matrix as well as the SPIKE-Synchronization-matrix, with different intervals. + +.. code:: python + + spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000) + + plt.figure() + isi_distance = spk.isi_distance_matrix(spike_trains) + plt.imshow(isi_distance, interpolation='none') + plt.title("ISI-distance") + + plt.figure() + spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0,1000)) + plt.imshow(spike_distance, interpolation='none') + plt.title("SPIKE-distance") + + plt.figure() + spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000,4000)) + plt.imshow(spike_sync, interpolation='none') + plt.title("SPIKE-Sync") + + plt.show() + -- cgit v1.2.3 From 97a1a5ace20b641fef78ed7cb2338ece04e07cac Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 12:25:27 +0200 Subject: updated changelog --- Changelog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Changelog b/Changelog index 209b138..beb2ce5 100644 --- a/Changelog +++ b/Changelog @@ -1,6 +1,9 @@ PySpike v0.2.1: * addition of __version__ attribute * restructured docs, Readme now only contains basic examples + * performance improvements by introducing specific distance functions + * performance improvements for multivariate profiles by using recursive + approach PySpike v0.2: -- cgit v1.2.3 From 017893e782d1747e1f031131077a40a48f882e86 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 14:32:48 +0200 Subject: treatment of empty spike trains in isi functions --- pyspike/SpikeTrain.py | 10 ++++++++ pyspike/isi_distance.py | 7 ++++-- test/test_empty.py | 62 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 test/test_empty.py diff --git a/pyspike/SpikeTrain.py b/pyspike/SpikeTrain.py index a02b7ab..9127b60 100644 --- a/pyspike/SpikeTrain.py +++ b/pyspike/SpikeTrain.py @@ -48,3 +48,13 @@ class SpikeTrain(object): """ return SpikeTrain(self.spikes.copy(), [self.t_start, self.t_end]) + + def get_spikes_non_empty(self): + """Returns the spikes of this spike train with auxiliary spikes in case + of empty spike trains. + """ + if len(self.spikes) < 2: + return np.unique(np.insert([self.t_start, self.t_end], 1, + self.spikes)) + else: + return self.spikes diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py index 2a1ed3a..5ea555d 100644 --- a/pyspike/isi_distance.py +++ b/pyspike/isi_distance.py @@ -41,7 +41,8 @@ Falling back to slow python backend.") from cython.python_backend import isi_distance_python \ as isi_profile_impl - times, values = isi_profile_impl(spike_train1.spikes, spike_train2.spikes, + times, values = isi_profile_impl(spike_train1.get_spikes_non_empty(), + spike_train2.get_spikes_non_empty(), spike_train1.t_start, spike_train1.t_end) return PieceWiseConstFunc(times, values) @@ -73,7 +74,9 @@ def isi_distance(spike_train1, spike_train2, interval=None): try: from cython.cython_distances import isi_distance_cython \ as isi_distance_impl - return isi_distance_impl(spike_train1.spikes, spike_train2.spikes, + + return isi_distance_impl(spike_train1.get_spikes_non_empty(), + spike_train2.get_spikes_non_empty(), spike_train1.t_start, spike_train1.t_end) except ImportError: # Cython backend not available: fall back to profile averaging diff --git a/test/test_empty.py b/test/test_empty.py new file mode 100644 index 0000000..22982c7 --- /dev/null +++ b/test/test_empty.py @@ -0,0 +1,62 @@ +""" test_empty.py + +Tests the distance measure for empty spike trains + +Copyright 2014, Mario Mulansky + +Distributed under the BSD License + +""" + +from __future__ import print_function +import numpy as np +from copy import copy +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_equal, assert_array_almost_equal + +import pyspike as spk +from pyspike import SpikeTrain + + +def test_get_non_empty(): + st = SpikeTrain([], edges=(0.0, 1.0)) + spikes = st.get_spikes_non_empty() + assert_array_equal(spikes, [0.0, 1.0]) + + st = SpikeTrain([0.5, ], edges=(0.0, 1.0)) + spikes = st.get_spikes_non_empty() + assert_array_equal(spikes, [0.0, 0.5, 1.0]) + + +def test_isi_empty(): + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([], edges=(0.0, 1.0)) + d = spk.isi_distance(st1, st2) + assert_equal(d, 0.0) + prof = spk.isi_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 1.0]) + assert_array_equal(prof.y, [0.0, ]) + + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.isi_distance(st1, st2) + assert_equal(d, 0.6*0.4+0.4*0.6) + prof = spk.isi_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 0.4, 1.0]) + assert_array_equal(prof.y, [0.6, 0.4]) + + st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.isi_distance(st1, st2) + assert_almost_equal(d, 0.2/0.6*0.4 + 0.0 + 0.2/0.6*0.4, decimal=15) + prof = spk.isi_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) + assert_array_almost_equal(prof.y, [0.2/0.6, 0.0, 0.2/0.6], decimal=15) + + +if __name__ == "__main__": + test_get_non_empty() + test_isi_empty() -- cgit v1.2.3 From b6521869cad89eae119391557bfa57e818dc9894 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 15:54:19 +0200 Subject: treatment of empty spike trains in spike distance --- pyspike/spike_distance.py | 13 +++++++------ test/test_empty.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py index d727fa2..ac2d260 100644 --- a/pyspike/spike_distance.py +++ b/pyspike/spike_distance.py @@ -41,10 +41,11 @@ Falling back to slow python backend.") from cython.python_backend import spike_distance_python \ as spike_profile_impl - times, y_starts, y_ends = spike_profile_impl(spike_train1.spikes, - spike_train2.spikes, - spike_train1.t_start, - spike_train1.t_end) + times, y_starts, y_ends = spike_profile_impl( + spike_train1.get_spikes_non_empty(), + spike_train2.get_spikes_non_empty(), + spike_train1.t_start, spike_train1.t_end) + return PieceWiseLinFunc(times, y_starts, y_ends) @@ -74,8 +75,8 @@ def spike_distance(spike_train1, spike_train2, interval=None): try: from cython.cython_distances import spike_distance_cython \ as spike_distance_impl - return spike_distance_impl(spike_train1.spikes, - spike_train2.spikes, + return spike_distance_impl(spike_train1.get_spikes_non_empty(), + spike_train2.get_spikes_non_empty(), spike_train1.t_start, spike_train1.t_end) except ImportError: diff --git a/test/test_empty.py b/test/test_empty.py index 22982c7..e08bd1a 100644 --- a/test/test_empty.py +++ b/test/test_empty.py @@ -10,7 +10,6 @@ Distributed under the BSD License from __future__ import print_function import numpy as np -from copy import copy from numpy.testing import assert_equal, assert_almost_equal, \ assert_array_equal, assert_array_almost_equal @@ -57,6 +56,53 @@ def test_isi_empty(): assert_array_almost_equal(prof.y, [0.2/0.6, 0.0, 0.2/0.6], decimal=15) +def test_spike_empty(): + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([], edges=(0.0, 1.0)) + d = spk.spike_distance(st1, st2) + assert_equal(d, 0.0) + prof = spk.spike_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 1.0]) + assert_array_equal(prof.y1, [0.0, ]) + assert_array_equal(prof.y2, [0.0, ]) + + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.spike_distance(st1, st2) + assert_almost_equal(d, 0.4*0.4*1.0/(0.4+1.0)**2 + 0.6*0.4*1.0/(0.6+1.0)**2, + decimal=15) + prof = spk.spike_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 0.4, 1.0]) + assert_array_almost_equal(prof.y1, [0.0, 2*0.4*1.0/(0.6+1.0)**2], + decimal=15) + assert_array_almost_equal(prof.y2, [2*0.4*1.0/(0.4+1.0)**2, 0.0], + decimal=15) + + st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.spike_distance(st1, st2) + s1 = np.array([0.0, 0.4*0.2/0.6, 0.2, 0.0]) + s2 = np.array([0.0, 0.2, 0.2*0.4/0.6, 0.0]) + isi1 = np.array([0.6, 0.6, 0.4]) + isi2 = np.array([0.4, 0.6, 0.6]) + expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) + expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) + expected_times = np.array([0.0, 0.4, 0.6, 1.0]) + expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * + (expected_y1+expected_y2)/2) + expected_spike_val /= (expected_times[-1]-expected_times[0]) + + assert_almost_equal(d, expected_spike_val, decimal=15) + prof = spk.spike_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) + assert_array_almost_equal(prof.y1, expected_y1, decimal=15) + assert_array_almost_equal(prof.y2, expected_y2, decimal=15) + + if __name__ == "__main__": test_get_non_empty() test_isi_empty() + test_spike_empty() -- cgit v1.2.3 From 3b10b416940ae674df6d9dff8cdddc31085d8cf5 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 17:03:29 +0200 Subject: updated copyright year --- test/test_empty.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_empty.py b/test/test_empty.py index e08bd1a..42c3716 100644 --- a/test/test_empty.py +++ b/test/test_empty.py @@ -2,7 +2,7 @@ Tests the distance measure for empty spike trains -Copyright 2014, Mario Mulansky +Copyright 2015, Mario Mulansky Distributed under the BSD License -- cgit v1.2.3 From bec2529367f1bdd5dac6d6fbaec560a30feec3c7 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 17:41:08 +0200 Subject: treatment of empty spike trains in spike sync --- pyspike/DiscreteFunc.py | 4 ++++ pyspike/cython/cython_distances.pyx | 13 +++++++++---- pyspike/cython/cython_profiles.pyx | 9 +++++---- pyspike/cython/python_backend.py | 2 +- test/test_empty.py | 39 +++++++++++++++++++++++++++++++++++++ 5 files changed, 58 insertions(+), 9 deletions(-) diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py index dfe2cab..6ade87e 100644 --- a/pyspike/DiscreteFunc.py +++ b/pyspike/DiscreteFunc.py @@ -136,6 +136,10 @@ class DiscreteFunc(object): :rtype: pair of float """ + if len(self.y) <= 2: + # no actual values in the profile, return spike sync of 0 + return 0.0, 1.0 + def get_indices(ival): """ Retuns the indeces surrounding the given interval""" start_ind = np.searchsorted(self.x, ival[0], side='right') diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index bf90638..16780f2 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -333,8 +333,8 @@ def spike_distance_cython(double[:] t1, double[:] t2, # get_tau ############################################################ cdef inline double get_tau(double[:] spikes1, double[:] spikes2, - int i, int j, double max_tau): - cdef double m = 1E100 # some huge number + int i, int j, double interval, double max_tau): + cdef double m = interval # use interval length as initial tau cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 if i < N1 and i > -1: @@ -363,12 +363,13 @@ def coincidence_value_cython(double[:] spikes1, double[:] spikes2, cdef int j = -1 cdef double coinc = 0.0 cdef double mp = 0.0 + cdef double interval = t_end - t_start cdef double tau while i + j < N1 + N2 - 2: if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): i += 1 mp += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) if j > -1 and spikes1[i]-spikes2[j] < tau: # coincidence between the current spike and the previous spike # both get marked with 1 @@ -376,7 +377,7 @@ def coincidence_value_cython(double[:] spikes1, double[:] spikes2, elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): j += 1 mp += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) if i > -1 and spikes2[j]-spikes1[i] < tau: # coincidence between the current spike and the previous spike # both get marked with 1 @@ -389,4 +390,8 @@ def coincidence_value_cython(double[:] spikes1, double[:] spikes2, mp += 2 coinc += 2 + if coinc == 0 and mp == 0: + # empty spike trains -> set mp to one to avoid 0/0 + mp = 1 + return coinc, mp diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index 3690091..d937a02 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -345,8 +345,8 @@ def spike_profile_cython(double[:] t1, double[:] t2, # get_tau ############################################################ cdef inline double get_tau(double[:] spikes1, double[:] spikes2, - int i, int j, double max_tau): - cdef double m = 1E100 # some huge number + int i, int j, double interval, double max_tau): + cdef double m = interval # use interval as initial tau cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1 cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1 if i < N1 and i > -1: @@ -377,12 +377,13 @@ def coincidence_profile_cython(double[:] spikes1, double[:] spikes2, cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times cdef double[:] c = np.zeros(N1 + N2 + 2) # coincidences cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity + cdef double interval = t_end - t_start cdef double tau while i + j < N1 + N2 - 2: if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]): i += 1 n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) st[n] = spikes1[i] if j > -1 and spikes1[i]-spikes2[j] < tau: # coincidence between the current spike and the previous spike @@ -392,7 +393,7 @@ def coincidence_profile_cython(double[:] spikes1, double[:] spikes2, elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]): j += 1 n += 1 - tau = get_tau(spikes1, spikes2, i, j, max_tau) + tau = get_tau(spikes1, spikes2, i, j, interval, max_tau) st[n] = spikes2[j] if i > -1 and spikes2[j]-spikes1[i] < tau: # coincidence between the current spike and the previous spike diff --git a/pyspike/cython/python_backend.py b/pyspike/cython/python_backend.py index 1fd8c42..830dc69 100644 --- a/pyspike/cython/python_backend.py +++ b/pyspike/cython/python_backend.py @@ -340,7 +340,7 @@ def cumulative_sync_python(spikes1, spikes2): def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): def get_tau(spikes1, spikes2, i, j, max_tau): - m = 1E100 # some huge number + m = t_end - t_start # use interval as initial tau if i < len(spikes1)-1 and i > -1: m = min(m, spikes1[i+1]-spikes1[i]) if j < len(spikes2)-1 and j > -1: diff --git a/test/test_empty.py b/test/test_empty.py index 42c3716..b31d8a4 100644 --- a/test/test_empty.py +++ b/test/test_empty.py @@ -102,7 +102,46 @@ def test_spike_empty(): assert_array_almost_equal(prof.y2, expected_y2, decimal=15) +def test_spike_sync_empty(): + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([], edges=(0.0, 1.0)) + d = spk.spike_sync(st1, st2) + assert_equal(d, 0.0) + prof = spk.spike_sync_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 1.0]) + assert_array_equal(prof.y, [0.0, 0.0]) + + st1 = SpikeTrain([], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.spike_sync(st1, st2) + assert_equal(d, 0.0) + prof = spk.spike_sync_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_equal(prof.x, [0.0, 0.4, 1.0]) + assert_array_equal(prof.y, [0.0, 0.0, 0.0]) + + st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) + d = spk.spike_sync(st1, st2) + assert_almost_equal(d, 1.0, decimal=15) + prof = spk.spike_sync_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) + assert_array_almost_equal(prof.y, [1.0, 1.0, 1.0, 1.0], decimal=15) + + st1 = SpikeTrain([0.2, ], edges=(0.0, 1.0)) + st2 = SpikeTrain([0.8, ], edges=(0.0, 1.0)) + d = spk.spike_sync(st1, st2) + assert_almost_equal(d, 0.0, decimal=15) + prof = spk.spike_sync_profile(st1, st2) + assert_equal(d, prof.avrg()) + assert_array_almost_equal(prof.x, [0.0, 0.2, 0.8, 1.0], decimal=15) + assert_array_almost_equal(prof.y, [0.0, 0.0, 0.0, 0.0], decimal=15) + + if __name__ == "__main__": test_get_non_empty() test_isi_empty() test_spike_empty() + test_spike_sync_empty() -- cgit v1.2.3 From a35402c208bd0ad31e5e60b6ddc55a3470e7bdde Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 11 May 2015 17:54:02 +0200 Subject: bugfix: spike_sync=1 for empty spike trains --- pyspike/DiscreteFunc.py | 4 ++-- pyspike/cython/cython_distances.pyx | 3 ++- pyspike/cython/cython_profiles.pyx | 12 ++++++++---- pyspike/cython/python_backend.py | 12 ++++++++---- test/test_empty.py | 4 ++-- 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py index 6ade87e..17153ee 100644 --- a/pyspike/DiscreteFunc.py +++ b/pyspike/DiscreteFunc.py @@ -137,8 +137,8 @@ class DiscreteFunc(object): """ if len(self.y) <= 2: - # no actual values in the profile, return spike sync of 0 - return 0.0, 1.0 + # no actual values in the profile, return spike sync of 1 + return 1.0, 1.0 def get_indices(ival): """ Retuns the indeces surrounding the given interval""" diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx index 16780f2..c4f2349 100644 --- a/pyspike/cython/cython_distances.pyx +++ b/pyspike/cython/cython_distances.pyx @@ -391,7 +391,8 @@ def coincidence_value_cython(double[:] spikes1, double[:] spikes2, coinc += 2 if coinc == 0 and mp == 0: - # empty spike trains -> set mp to one to avoid 0/0 + # empty spike trains -> spike sync = 1 by definition + coinc = 1 mp = 1 return coinc, mp diff --git a/pyspike/cython/cython_profiles.pyx b/pyspike/cython/cython_profiles.pyx index d937a02..f9893eb 100644 --- a/pyspike/cython/cython_profiles.pyx +++ b/pyspike/cython/cython_profiles.pyx @@ -416,9 +416,13 @@ def coincidence_profile_cython(double[:] spikes1, double[:] spikes2, st[0] = t_start st[len(st)-1] = t_end - c[0] = c[1] - c[len(c)-1] = c[len(c)-2] - mp[0] = mp[1] - mp[len(mp)-1] = mp[len(mp)-2] + if N1 + N2 > 0: + c[0] = c[1] + c[len(c)-1] = c[len(c)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + c[0] = 1 + c[1] = 1 return st, c, mp diff --git a/pyspike/cython/python_backend.py b/pyspike/cython/python_backend.py index 830dc69..69a420f 100644 --- a/pyspike/cython/python_backend.py +++ b/pyspike/cython/python_backend.py @@ -399,10 +399,14 @@ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau): st[0] = t_start st[len(st)-1] = t_end - c[0] = c[1] - c[len(c)-1] = c[len(c)-2] - mp[0] = mp[1] - mp[len(mp)-1] = mp[len(mp)-2] + if N1 + N2 > 0: + c[0] = c[1] + c[len(c)-1] = c[len(c)-2] + mp[0] = mp[1] + mp[len(mp)-1] = mp[len(mp)-2] + else: + c[0] = 1 + c[1] = 1 return st, c, mp diff --git a/test/test_empty.py b/test/test_empty.py index b31d8a4..48be25d 100644 --- a/test/test_empty.py +++ b/test/test_empty.py @@ -106,11 +106,11 @@ def test_spike_sync_empty(): st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([], edges=(0.0, 1.0)) d = spk.spike_sync(st1, st2) - assert_equal(d, 0.0) + assert_equal(d, 1.0) prof = spk.spike_sync_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 1.0]) - assert_array_equal(prof.y, [0.0, 0.0]) + assert_array_equal(prof.y, [1.0, 1.0]) st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) -- cgit v1.2.3 From 6f418a5a837939b132967bcdb3ff0ede6d899bd2 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Tue, 12 May 2015 18:45:03 +0200 Subject: +functions to obtain values of the pwc/pwl profile Added __call__ operators to PieceWiseConst and PieceWiseLin class for obtaining function values at certain points in time. --- pyspike/PieceWiseConstFunc.py | 22 ++++++++++++++++++++++ pyspike/PieceWiseLinFunc.py | 30 ++++++++++++++++++++++++++++++ test/test_function.py | 32 +++++++++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index 41998ef..cf64e58 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -26,6 +26,28 @@ class PieceWiseConstFunc(object): self.x = np.array(x) self.y = np.array(y) + def __call__(self, t): + """ Returns the function value for the given time t. If t is a list of + times, the corresponding list of values is returned. + + :param: time t, or list of times + :returns: function value(s) at that time(s). + """ + assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \ + "Invalid time: " + str(t) + + ind = np.searchsorted(self.x, t, side='right') + # correct the cases t == x[0], t == x[-1] + try: + ind[ind == 0] = 1 + ind[ind == len(self.x)] = len(self.x)-1 + except TypeError: + if ind == 0: + ind = 1 + if ind == len(self.x): + ind = len(self.x)-1 + return self.y[ind-1] + def copy(self): """ Returns a copy of itself diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index f2442be..b9787eb 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -29,6 +29,36 @@ class PieceWiseLinFunc: self.y1 = np.array(y1) self.y2 = np.array(y2) + def __call__(self, t): + """ Returns the function value for the given time t. If t is a list of + times, the corresponding list of values is returned. + + :param: time t, or list of times + :returns: function value(s) at that time(s). + """ + def intermediate_value(x0, x1, y0, y1, x): + """ computes the intermediate value of a linear function """ + return y0 + (y1-y0)*(x-x0)/(x1-x0) + + assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \ + "Invalid time: " + str(t) + + ind = np.searchsorted(self.x, t, side='right') + # correct the cases t == x[0], t == x[-1] + try: + ind[ind == 0] = 1 + ind[ind == len(self.x)] = len(self.x)-1 + except TypeError: + if ind == 0: + ind = 1 + if ind == len(self.x): + ind = len(self.x)-1 + return intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) + def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index d81b03a..c56a295 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,7 +10,8 @@ Distributed under the BSD License from __future__ import print_function import numpy as np from copy import copy -from numpy.testing import assert_almost_equal, assert_array_almost_equal +from numpy.testing import assert_equal, assert_almost_equal, \ + assert_array_equal, assert_array_almost_equal import pyspike as spk @@ -20,6 +21,17 @@ def test_pwc(): x = [0.0, 1.0, 2.0, 2.5, 4.0] y = [1.0, -0.5, 1.5, 0.75] f = spk.PieceWiseConstFunc(x, y) + + # function values + assert_equal(f(0.0), 1.0) + assert_equal(f(0.5), 1.0) + assert_equal(f(2.25), 1.5) + assert_equal(f(3.5), 0.75) + assert_equal(f(4.0), 0.75) + + assert_array_equal(f([0.0, 0.5, 2.25, 3.5, 4.0]), + [1.0, 1.0, 1.5, 0.75, 0.75]) + xp, yp = f.get_plottable_data() xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] @@ -38,11 +50,17 @@ def test_pwc(): assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16) a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.5*0.75)/3.0, decimal=16) + a = f.avrg([0.0, 2.2]) + assert_almost_equal(a, (1.0*1.0-0.5*1.0+0.2*1.5)/2.2, decimal=15) # averaging over multiple intervals a = f.avrg([(0.5, 1.5), (1.5, 3.5)]) assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) + # averaging over multiple intervals + a = f.avrg([(0.5, 1.5), (2.2, 3.5)]) + assert_almost_equal(a, (0.5*1.0-0.5*0.5+0.3*1.5+1.0*0.75)/2.3, decimal=15) + def test_pwc_add(): # some random data @@ -105,6 +123,18 @@ def test_pwl(): y1 = [1.0, -0.5, 1.5, 0.75] y2 = [1.5, -0.4, 1.5, 0.25] f = spk.PieceWiseLinFunc(x, y1, y2) + + # function values + assert_equal(f(0.0), 1.0) + assert_equal(f(0.5), 1.25) + assert_equal(f(2.25), 1.5) + assert_equal(f(2.5), 0.75) + assert_equal(f(3.5), 0.75-0.5*1.0/1.5) + assert_equal(f(4.0), 0.25) + + assert_array_equal(f([0.0, 0.5, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.25, 1.5, 0.75, 0.75-0.5*1.0/1.5, 0.25]) + xp, yp = f.get_plottable_data() xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0] -- cgit v1.2.3 From 40e6372c0813cb8f34abd0ffee353bde7a96f158 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 13 May 2015 11:26:38 +0200 Subject: new profiles example --- examples/profiles.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 examples/profiles.py diff --git a/examples/profiles.py b/examples/profiles.py new file mode 100644 index 0000000..05494bd --- /dev/null +++ b/examples/profiles.py @@ -0,0 +1,66 @@ +""" profiles.py + +Simple example showing some functionality of distance profiles. + +Copyright 2015, Mario Mulansky + +Distributed under the BSD License +""" + + +from __future__ import print_function + +import pyspike as spk + +spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", + edges=(0, 4000)) + +##### ISI PROFILES ####### + +# compute the ISI profile of the first two spike trains +f = spk.isi_profile(spike_trains[0], spike_trains[1]) + +# ISI values at certain points +t = 1200 +print("ISI value at t =", t, ":", f(t)) +t = [900, 1100, 2000, 3100] +print("ISI value at t =", t, ":", f(t)) +print("Average ISI distance:", f.avrg()) +print() + +# compute the multivariate ISI profile +f = spk.isi_profile_multi(spike_trains) + +t = 1200 +print("Multivariate ISI value at t =", t, ":", f(t)) +t = [900, 1100, 2000, 3100] +print("Multivariate ISI value at t =", t, ":", f(t)) +print("Average multivariate ISI distance:", f.avrg()) +print() +print() + +# for plotting, use the get_plottable_data() member function, see plot.py + + +##### SPIKE PROFILES ####### + +# compute the SPIKE profile of the first two spike trains +f = spk.spike_profile(spike_trains[0], spike_trains[1]) + +# SPIKE distance values at certain points +t = 1200 +print("SPIKE value at t =", t, ":", f(t)) +t = [900, 1100, 2000, 3100] +print("SPIKE value at t =", t, ":", f(t)) +print("Average SPIKE distance:", f.avrg()) +print() + +# compute the multivariate SPIKE profile +f = spk.spike_profile_multi(spike_trains) + +# SPIKE values at certain points +t = 1200 +print("Multivariate SPIKE value at t =", t, ":", f(t)) +t = [900, 1100, 2000, 3100] +print("Multivariate SPIKE value at t =", t, ":", f(t)) +print("Average multivariate SPIKE distance:", f.avrg()) -- cgit v1.2.3 From f3e21dcc82d48f0980b0107f1e5cf320a8b213f3 Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 13 May 2015 11:28:07 +0200 Subject: updated Changelog --- Changelog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Changelog b/Changelog index beb2ce5..798403e 100644 --- a/Changelog +++ b/Changelog @@ -4,6 +4,9 @@ PySpike v0.2.1: * performance improvements by introducing specific distance functions * performance improvements for multivariate profiles by using recursive approach + * new example: performance.py + * consistent treatment of empty spike trains + * new example: profiles.py PySpike v0.2: -- cgit v1.2.3 From 8841138b74242ed9eb77c972c76e9a617778a79a Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Wed, 13 May 2015 18:19:02 +0200 Subject: pwc function now returns intermediate value at exact spike times --- pyspike/PieceWiseConstFunc.py | 35 +++++++++++++++++++++++++++-------- test/test_function.py | 6 ++++-- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index cf64e58..dea1a56 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -37,16 +37,35 @@ class PieceWiseConstFunc(object): "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') - # correct the cases t == x[0], t == x[-1] - try: + if isinstance(t, collections.Sequence): + # t is a sequence of values + # correct the cases t == x[0], t == x[-1] ind[ind == 0] = 1 ind[ind == len(self.x)] = len(self.x)-1 - except TypeError: - if ind == 0: - ind = 1 - if ind == len(self.x): - ind = len(self.x)-1 - return self.y[ind-1] + value = self.y[ind-1] + # correct the values at exact spike times: there the value should + # be the at half of the step + # obtain the 'left' side indices for t + ind_l = np.searchsorted(self.x, t, side='left') + # if left and right side indices differ, the time t has to appear + # in self.x + ind_at_spike = ind[np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x))] + value[ind_at_spike] = 0.5 * (self.y[ind_at_spike-1] + + self.y[ind_at_spike-2]) + return value + else: + # specific check for interval edges + if t == self.x[0]: + return self.y[0] + if t == self.x[-1]: + return self.y[-1] + # check if we are on any other exact spike time + if sum(self.x == t) > 0: + # use the middle of the left and right ISI value + return 0.5 * (self.y[ind-1] + self.y[ind-2]) + return self.y[ind-1] def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index c56a295..8ad4b17 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -25,12 +25,14 @@ def test_pwc(): # function values assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.0) + assert_equal(f(1.0), 0.25) assert_equal(f(2.25), 1.5) + assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75) assert_equal(f(4.0), 0.75) - assert_array_equal(f([0.0, 0.5, 2.25, 3.5, 4.0]), - [1.0, 1.0, 1.5, 0.75, 0.75]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.0, 0.25, 1.5, 2.25/2, 0.75, 0.75]) xp, yp = f.get_plottable_data() -- cgit v1.2.3 From a61a14295e28e6e95fa510693a11ae8c78a552ab Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Sun, 17 May 2015 17:50:39 +0200 Subject: return correct values at exact spike times pwc and pwl function object return the average of the left and right limit as function value at the exact spike times. --- pyspike/PieceWiseConstFunc.py | 15 ++++++++----- pyspike/PieceWiseLinFunc.py | 51 +++++++++++++++++++++++++++++++++---------- test/test_function.py | 13 ++++++----- 3 files changed, 56 insertions(+), 23 deletions(-) diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index dea1a56..6d7a845 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -49,13 +49,16 @@ class PieceWiseConstFunc(object): ind_l = np.searchsorted(self.x, t, side='left') # if left and right side indices differ, the time t has to appear # in self.x - ind_at_spike = ind[np.logical_and(np.logical_and(ind != ind_l, - ind > 1), - ind < len(self.x))] - value[ind_at_spike] = 0.5 * (self.y[ind_at_spike-1] + - self.y[ind_at_spike-2]) + ind_at_spike = np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x)) + # get the corresponding indices for the resulting value array + val_ind = np.arange(len(ind))[ind_at_spike] + # and for the arrays self.x, y1, y2 + xy_ind = ind[ind_at_spike] + value[val_ind] = 0.5 * (self.y[xy_ind-1] + self.y[xy_ind-2]) return value - else: + else: # t is a single value # specific check for interval edges if t == self.x[0]: return self.y[0] diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index b9787eb..03c2da2 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -44,20 +44,47 @@ class PieceWiseLinFunc: "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') - # correct the cases t == x[0], t == x[-1] - try: + if isinstance(t, collections.Sequence): + # t is a sequence of values + # correct the cases t == x[0], t == x[-1] ind[ind == 0] = 1 ind[ind == len(self.x)] = len(self.x)-1 - except TypeError: - if ind == 0: - ind = 1 - if ind == len(self.x): - ind = len(self.x)-1 - return intermediate_value(self.x[ind-1], - self.x[ind], - self.y1[ind-1], - self.y2[ind-1], - t) + value = intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) + # correct the values at exact spike times: there the value should + # be the at half of the step + # obtain the 'left' side indices for t + ind_l = np.searchsorted(self.x, t, side='left') + # if left and right side indices differ, the time t has to appear + # in self.x + ind_at_spike = np.logical_and(np.logical_and(ind != ind_l, + ind > 1), + ind < len(self.x)) + # get the corresponding indices for the resulting value array + val_ind = np.arange(len(ind))[ind_at_spike] + # and for the values in self.x, y1, y2 + xy_ind = ind[ind_at_spike] + # the values are defined as the average of the left and right limit + value[val_ind] = 0.5 * (self.y1[xy_ind-1] + self.y2[xy_ind-2]) + return value + else: # t is a single value + # specific check for interval edges + if t == self.x[0]: + return self.y1[0] + if t == self.x[-1]: + return self.y2[-1] + # check if we are on any other exact spike time + if sum(self.x == t) > 0: + # use the middle of the left and right Spike value + return 0.5 * (self.y1[ind-1] + self.y2[ind-2]) + return intermediate_value(self.x[ind-1], + self.x[ind], + self.y1[ind-1], + self.y2[ind-1], + t) def copy(self): """ Returns a copy of itself diff --git a/test/test_function.py b/test/test_function.py index 8ad4b17..92d378d 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -26,13 +26,14 @@ def test_pwc(): assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.0) assert_equal(f(1.0), 0.25) + assert_equal(f(2.0), 0.5) assert_equal(f(2.25), 1.5) assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75) assert_equal(f(4.0), 0.75) - assert_array_equal(f([0.0, 0.5, 1.0, 2.25, 2.5, 3.5, 4.0]), - [1.0, 1.0, 0.25, 1.5, 2.25/2, 0.75, 0.75]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.0, 0.25, 0.5, 1.5, 2.25/2, 0.75, 0.75]) xp, yp = f.get_plottable_data() @@ -129,13 +130,15 @@ def test_pwl(): # function values assert_equal(f(0.0), 1.0) assert_equal(f(0.5), 1.25) + assert_equal(f(1.0), 0.5) + assert_equal(f(2.0), 1.1/2) assert_equal(f(2.25), 1.5) - assert_equal(f(2.5), 0.75) + assert_equal(f(2.5), 2.25/2) assert_equal(f(3.5), 0.75-0.5*1.0/1.5) assert_equal(f(4.0), 0.25) - assert_array_equal(f([0.0, 0.5, 2.25, 2.5, 3.5, 4.0]), - [1.0, 1.25, 1.5, 0.75, 0.75-0.5*1.0/1.5, 0.25]) + assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]), + [1.0, 1.25, 0.5, 0.55, 1.5, 2.25/2, 0.75-0.5/1.5, 0.25]) xp, yp = f.get_plottable_data() -- cgit v1.2.3 From c8f524db6e1add464aa3596ea46534c21096589e Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 18 May 2015 15:08:24 +0200 Subject: cosmetics --- pyspike/PieceWiseConstFunc.py | 2 ++ pyspike/PieceWiseLinFunc.py | 1 + 2 files changed, 3 insertions(+) diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py index 6d7a845..2705443 100644 --- a/pyspike/PieceWiseConstFunc.py +++ b/pyspike/PieceWiseConstFunc.py @@ -37,6 +37,7 @@ class PieceWiseConstFunc(object): "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') + if isinstance(t, collections.Sequence): # t is a sequence of values # correct the cases t == x[0], t == x[-1] @@ -56,6 +57,7 @@ class PieceWiseConstFunc(object): val_ind = np.arange(len(ind))[ind_at_spike] # and for the arrays self.x, y1, y2 xy_ind = ind[ind_at_spike] + # use the middle of the left and right ISI value value[val_ind] = 0.5 * (self.y[xy_ind-1] + self.y[xy_ind-2]) return value else: # t is a single value diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py index 03c2da2..c0dd475 100644 --- a/pyspike/PieceWiseLinFunc.py +++ b/pyspike/PieceWiseLinFunc.py @@ -44,6 +44,7 @@ class PieceWiseLinFunc: "Invalid time: " + str(t) ind = np.searchsorted(self.x, t, side='right') + if isinstance(t, collections.Sequence): # t is a sequence of values # correct the cases t == x[0], t == x[-1] -- cgit v1.2.3 From 81fc2c20e7360714f218e9bba729ec4387f59aef Mon Sep 17 00:00:00 2001 From: Mario Mulansky Date: Mon, 18 May 2015 15:15:36 +0200 Subject: set version 0.3 --- Changelog | 4 +++- doc/conf.py | 4 ++-- setup.py | 8 ++++---- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Changelog b/Changelog index 798403e..519dd3b 100644 --- a/Changelog +++ b/Changelog @@ -1,4 +1,4 @@ -PySpike v0.2.1: +PySpike v0.3: * addition of __version__ attribute * restructured docs, Readme now only contains basic examples * performance improvements by introducing specific distance functions @@ -7,6 +7,8 @@ PySpike v0.2.1: * new example: performance.py * consistent treatment of empty spike trains * new example: profiles.py + * additional functionality: pwc and pwl function classes can now return + function values at given time(s) PySpike v0.2: diff --git a/doc/conf.py b/doc/conf.py index 9b994c2..8011ea9 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -64,9 +64,9 @@ copyright = u'2014-2015, Mario Mulansky' # built documents. # # The short X.Y version. -version = '0.2' +version = '0.3' # The full version, including alpha/beta/rc tags. -release = '0.2.0' +release = '0.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 7902066..d853cdf 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ to compile cython files: python setup.py build_ext --inplace -Copyright 2014, Mario Mulansky +Copyright 2014-2015, Mario Mulansky Distributed under the BSD License @@ -49,7 +49,7 @@ elif use_c: # c files are there, compile to binaries setup( name='pyspike', packages=find_packages(exclude=['doc']), - version='0.1.3', + version='0.3.0', cmdclass=cmdclass, ext_modules=ext_modules, include_dirs=[numpy.get_include()], @@ -59,7 +59,6 @@ train similarity', author_email='mario.mulanskygmx.net', license='BSD', url='https://github.com/mariomulansky/PySpike', - # download_url='https://github.com/mariomulansky/PySpike/tarball/0.1', install_requires=['numpy'], keywords=['data analysis', 'spike', 'neuroscience'], # arbitrary keywords classifiers=[ @@ -81,7 +80,8 @@ train similarity', 'Programming Language :: Python :: 2.7', ], package_data={ - 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c'], + 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c', + 'cython_distances.c'], 'test': ['Spike_testdata.txt'] } ) -- cgit v1.2.3