summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMario Mulansky <mario.mulansky@gmx.net>2015-05-18 15:29:41 +0200
committerMario Mulansky <mario.mulansky@gmx.net>2015-05-18 15:29:41 +0200
commitd985f3a8de6ae840c8a127653b3d9affb1a8aa40 (patch)
treefc583b16d030b6ba67cf09895fd269bd297ad660
parenta718911ba2aac9302465c0522cc18b4470b99f77 (diff)
parent2b957ac5d7c964b6fe0e99bb078a396732331869 (diff)
Merge branch 'develop'0.3.0
-rw-r--r--Changelog12
-rw-r--r--Contributors.txt6
-rw-r--r--Readme.rst202
-rw-r--r--doc/conf.py4
-rw-r--r--doc/index.rst8
-rw-r--r--doc/tutorial.rst213
-rw-r--r--examples/performance.py65
-rw-r--r--examples/profiles.py66
-rw-r--r--pyspike/DiscreteFunc.py21
-rw-r--r--pyspike/PieceWiseConstFunc.py46
-rw-r--r--pyspike/PieceWiseLinFunc.py58
-rw-r--r--pyspike/SpikeTrain.py10
-rw-r--r--pyspike/cython/cython_add.pyx20
-rw-r--r--pyspike/cython/cython_distances.pyx398
-rw-r--r--pyspike/cython/cython_profiles.pyx (renamed from pyspike/cython/cython_distance.pyx)51
-rw-r--r--pyspike/cython/python_backend.py14
-rw-r--r--pyspike/generic.py77
-rw-r--r--pyspike/isi_distance.py39
-rw-r--r--pyspike/spike_distance.py44
-rw-r--r--pyspike/spike_sync.py79
-rw-r--r--setup.py15
-rw-r--r--test/test_distance.py14
-rw-r--r--test/test_empty.py147
-rw-r--r--test/test_function.py37
24 files changed, 1345 insertions, 301 deletions
diff --git a/Changelog b/Changelog
index 2ac065b..519dd3b 100644
--- a/Changelog
+++ b/Changelog
@@ -1,3 +1,15 @@
+PySpike v0.3:
+ * addition of __version__ attribute
+ * restructured docs, Readme now only contains basic examples
+ * performance improvements by introducing specific distance functions
+ * performance improvements for multivariate profiles by using recursive
+ approach
+ * new example: performance.py
+ * consistent treatment of empty spike trains
+ * new example: profiles.py
+ * additional functionality: pwc and pwl function classes can now return
+ function values at given time(s)
+
PySpike v0.2:
* introduction of SpikeTrain class. Breaking interface change of
diff --git a/Contributors.txt b/Contributors.txt
index 0d4afee..83563c7 100644
--- a/Contributors.txt
+++ b/Contributors.txt
@@ -3,10 +3,10 @@ Python/C Programming:
Scientific Methods:
- Thomas Kreuz
-- Nebojsa D. Bozanic
-- Mario Mulansky
-- Conor Houghton
- Daniel Chicharro
+- Conor Houghton
+- Nebojsa Bozanic
+- Mario Mulansky
The work on PySpike was supported by the European Comission through the Marie
diff --git a/Readme.rst b/Readme.rst
index f50b12b..69a86e8 100644
--- a/Readme.rst
+++ b/Readme.rst
@@ -19,12 +19,12 @@ All source codes are available on `Github <https://github.com/mariomulansky/PySp
.. [#] Kreuz T, Chicharro D, Houghton C, Andrzejak RG, Mormann F, *Monitoring spike train synchrony.* J Neurophysiol 109, 1457 (2013) `[pdf] <http://wwwold.fi.isc.cnr.it/users/thomas.kreuz/images/Kreuz_JNeurophysiol_2013_SPIKE-distance.pdf>`_
-.. [#] Kreuz T, Mulansky M and Bozanic N, *SPIKY: A graphical user interface for monitoring spike train synchrony*, tbp (2015)
+.. [#] Kreuz T, Mulansky M and Bozanic N, *SPIKY: A graphical user interface for monitoring spike train synchrony*, J Neurophysiol, in press (2015)
Important Changelog
-----------------------------
-With version 0.2.0, the :class:`.SpikeTrain` class has been introduced to represent spike trains.
+With version 0.2.0, the :code:`SpikeTrain` class has been introduced to represent spike trains.
This is a breaking change in the function interfaces.
Hence, programs written for older versions of PySpike (0.1.x) will not run with newer versions.
@@ -72,63 +72,11 @@ Then you can run the tests using the `nosetests` test framework:
Finally, you should make PySpike's installation folder known to Python to be able to import pyspike in your own projects.
Therefore, add your :code:`/path/to/PySpike` to the :code:`$PYTHONPATH` environment variable.
-Spike trains
-------------
-In PySpike, spike trains are represented by :class:`.SpikeTrain` objects.
-A :class:`.SpikeTrain` object consists of the spike times given as `numpy` arrays as well as the edges of the spike train as :code:`[t_start, t_end]`.
-The following code creates such a spike train with some arbitrary spike times:
-
-.. code:: python
-
- import numpy as np
- from pyspike import SpikeTrain
-
- spike_train = SpikeTrain(np.array([0.1, 0.3, 0.45, 0.6, 0.9], [0.0, 1.0]))
-
-Loading from text files
-.......................
-
-Typically, spike train data is loaded into PySpike from data files.
-The most straight-forward data files are text files where each line represents one spike train given as an sequence of spike times.
-An exemplary file with several spike trains is `PySpike_testdata.txt <https://github.com/mariomulansky/PySpike/blob/master/examples/PySpike_testdata.txt>`_.
-To quickly obtain spike trains from such files, PySpike provides the function :func:`.load_spike_trains_from_txt`.
-
-.. code:: python
-
- import numpy as np
- import pyspike as spk
-
- spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
- edges=(0, 4000))
-
-This function expects the name of the data file as first parameter.
-Furthermore, the time interval of the spike train measurement (edges of the spike trains) should be provided as a pair of start- and end-time values.
-Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function).
-As result, :func:`.load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file.
-
-
-Computing bivariate distances profiles
----------------------------------------
-
-**Important note:**
-
-------------------------------
-
- Spike trains are expected to be *sorted*!
- For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted.
- Make sure that all your spike trains are sorted, which is ensured if you use the :func:`.load_spike_trains_from_txt` function with the parameter `is_sorted=False` (default).
- If in doubt, use :meth:`.SpikeTrain.sort()` to ensure a correctly sorted spike train.
-
- If you need to copy a spike train, use the :meth:`.SpikeTrain.copy()` method.
- Simple assignment `t2 = t1` does not create a copy of the spike train data, but a reference as `numpy.array` is used for storing the data.
-
-------------------------------
-
-ISI-distance
-............
+Examples
+-----------------------------
-The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :class:`.SpikeTrain` s, and plots it with matplotlib:
+The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :code:`SpikeTrain` s, and plots it with matplotlib:
.. code:: python
@@ -143,109 +91,8 @@ The following code loads some exemplary spike trains, computes the dissimilarity
print("ISI distance: %.8f" % isi_profile.avrg())
plt.show()
-The ISI-profile is a piece-wise constant function, and hence the function :func:`.isi_profile` returns an instance of the :class:`.PieceWiseConstFunc` class.
-As shown above, this class allows you to obtain arrays that can be used to plot the function with :code:`plt.plt`, but also to compute the time average, which amounts to the final scalar ISI-distance.
-By default, the time average is computed for the whole :class:`.PieceWiseConstFunc` function.
-However, it is also possible to obtain the average of a specific interval by providing a pair of floats defining the start and end of the interval.
-For the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals:
-
-.. code:: python
-
- isi1 = isi_profile.avrg(interval=(0, 1000))
- isi2 = isi_profile.avrg(interval=(1000, 2000))
- isi3 = isi_profile.avrg(interval=[(0, 1000), (2000, 3000)])
- isi4 = isi_profile.avrg(interval=[(1000, 2000), (3000, 4000)])
-
-Note, how also multiple intervals can be supplied by giving a list of tuples.
-
-If you are only interested in the scalar ISI-distance and not the profile, you can simply use:
-
-.. code:: python
-
- isi_dist = spk.isi_distance(spike_trains[0], spike_trains[1], interval)
-
-where :code:`interval` is optional, as above, and if omitted the ISI-distance is computed for the complete spike trains.
-
-
-SPIKE-distance
-..............
-
-To compute for the spike distance profile you use the function :func:`.spike_profile` instead of :code:`isi_profile` above.
-But the general approach is very similar:
-
-.. code:: python
-
- import matplotlib.pyplot as plt
- import pyspike as spk
-
- spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
- edges=(0, 4000))
- spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1])
- x, y = spike_profile.get_plottable_data()
- plt.plot(x, y, '--k')
- print("SPIKE distance: %.8f" % spike_profile.avrg())
- plt.show()
-
-This short example computes and plots the SPIKE-profile of the first two spike trains in the file :code:`PySpike_testdata.txt`.
-In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :class:`.PieceWiseLinFunc` object.
-Just like the :class:`.PieceWiseConstFunc` for the ISI-profile, the :class:`.PieceWiseLinFunc` provides a :meth:`.PieceWiseLinFunc.get_plottable_data` member function that returns arrays that can be used directly to plot the function.
-Furthermore, the :meth:`.PieceWiseLinFunc.avrg` member function returns the average of the profile defined as the overall SPIKE distance.
-As above, you can provide an interval as a pair of floats as well as a sequence of such pairs to :code:`avrg` to specify the averaging interval if required.
-
-Again, you can use
-
-.. code:: python
-
- spike_dist = spk.spike_distance(spike_trains[0], spike_trains[1], interval)
-
-to compute the SPIKE distance directly, if you are not interested in the profile at all.
-The parameter :code:`interval` is optional and if neglected the whole spike train is used.
-
-
-SPIKE synchronization
-.....................
-
-**Important note:**
-
-------------------------------
- SPIKE-Synchronization measures *similarity*.
- That means, a value of zero indicates absence of synchrony, while a value of one denotes the presence of synchrony.
- This is exactly opposite to the other two measures: ISI- and SPIKE-distance.
-
-----------------------
-
-
-SPIKE synchronization is another approach to measure spike synchrony.
-In contrast to the SPIKE- and ISI-distance, it measures similarity instead of dissimilarity, i.e. higher values represent larger synchrony.
-Another difference is that the SPIKE synchronization profile is only defined exactly at the spike times, not for the whole interval of the spike trains.
-Therefore, it is represented by a :class:`.DiscreteFunction`.
-
-To compute for the spike synchronization profile, PySpike provides the function :func:`.spike_sync_profile`.
-The general handling of the profile, however, is similar to the other profiles above:
-
-.. code:: python
-
- import matplotlib.pyplot as plt
- import pyspike as spk
-
- spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
- edges=(0, 4000))
- spike_profile = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
- x, y = spike_profile.get_plottable_data()
-
-For the direct computation of the overall spike synchronization value within some interval, the :func:`.spike_sync` function can be used:
-
-.. code:: python
-
- spike_sync = spk.spike_sync(spike_trains[0], spike_trains[1], interval)
-
-
-Computing multivariate profiles and distances
-----------------------------------------------
-
-To compute the multivariate ISI-profile, SPIKE-profile or SPIKE-Synchronization profile f a set of spike trains, PySpike provides multi-variate version of the profile function.
-The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :func:`.isi_profile_multi`, :func:`.spike_profile_multi`, :func:`.spike_sync_profile_multi` functions:
+The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :code:`isi_profile_multi`, :code:`spike_profile_multi`, :code:`spike_sync_profile_multi` functions:
.. code:: python
@@ -255,36 +102,7 @@ The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-prof
avrg_spike_profile = spk.spike_profile_multi(spike_trains)
avrg_spike_sync_profile = spk.spike_sync_profile_multi(spike_trains)
-All functions take an optional parameter :code:`indices`, a list of indices that allows to define the spike trains that should be used for the multivariate profile.
-As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :func:`.isi_distance_multi`, :func:`.spike_distance_multi` and :func:`.spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value.
-Those functions also accept an :code:`interval` parameter that can be used to specify the begin and end of the averaging interval as a pair of floats, if neglected the complete interval is used.
-
-Another option to characterize large sets of spike trains are distance matrices.
-Each entry in the distance matrix represents a bivariate distance (similarity for SPIKE-Synchronization) of two spike trains.
-The distance matrix is symmetric and has zero values (ones) at the diagonal and is computed with the functions :func:`.isi_distance_matrix`, :func:`.spike_distance_matrix` and :func:`.spike_sync_matrix`.
-The following example computes and plots the ISI- and SPIKE-distance matrix as well as the SPIKE-Synchronization-matrix, with different intervals.
-
-.. code:: python
-
- spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)
-
- plt.figure()
- isi_distance = spk.isi_distance_matrix(spike_trains)
- plt.imshow(isi_distance, interpolation='none')
- plt.title("ISI-distance")
-
- plt.figure()
- spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0,1000))
- plt.imshow(spike_distance, interpolation='none')
- plt.title("SPIKE-distance")
-
- plt.figure()
- spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000,4000))
- plt.imshow(spike_sync, interpolation='none')
- plt.title("SPIKE-Sync")
-
- plt.show()
-
+More examples with detailed descriptions can be found in the `tutorial section <http://mariomulansky.github.io/PySpike/#tutorial>`_.
===============================================================================
@@ -298,10 +116,10 @@ Curie Initial Training Network* `Neural Engineering Transformative Technologies
**Scientific Methods:**
- Thomas Kreuz
- - Nebojsa D. Bozanic
- - Mario Mulansky
- - Conor Houghton
- Daniel Chicharro
+ - Conor Houghton
+ - Nebojsa Bozanic
+ - Mario Mulansky
.. _ISI: http://www.scholarpedia.org/article/Measures_of_spike_train_synchrony#ISI-distance
.. _SPIKE: http://www.scholarpedia.org/article/SPIKE-distance
diff --git a/doc/conf.py b/doc/conf.py
index 5427bb1..8011ea9 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -64,9 +64,9 @@ copyright = u'2014-2015, Mario Mulansky'
# built documents.
#
# The short X.Y version.
-version = '0.1'
+version = '0.3'
# The full version, including alpha/beta/rc tags.
-release = '0.1'
+release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/index.rst b/doc/index.rst
index bc05b60..f34690b 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -5,6 +5,11 @@
.. include:: ../Readme.rst
+Tutorial
+===================================
+
+.. include:: tutorial.rst
+
Reference
===================================
@@ -14,9 +19,8 @@ Reference
pyspike
Indices and tables
-==================
+===================================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
-
diff --git a/doc/tutorial.rst b/doc/tutorial.rst
new file mode 100644
index 0000000..f7fc20b
--- /dev/null
+++ b/doc/tutorial.rst
@@ -0,0 +1,213 @@
+Spike trains
+------------
+
+In PySpike, spike trains are represented by :class:`.SpikeTrain` objects.
+A :class:`.SpikeTrain` object consists of the spike times given as `numpy` arrays as well as the edges of the spike train as :code:`[t_start, t_end]`.
+The following code creates such a spike train with some arbitrary spike times:
+
+.. code:: python
+
+ import numpy as np
+ from pyspike import SpikeTrain
+
+ spike_train = SpikeTrain(np.array([0.1, 0.3, 0.45, 0.6, 0.9], [0.0, 1.0]))
+
+Loading from text files
+.......................
+
+Typically, spike train data is loaded into PySpike from data files.
+The most straight-forward data files are text files where each line represents one spike train given as an sequence of spike times.
+An exemplary file with several spike trains is `PySpike_testdata.txt <https://github.com/mariomulansky/PySpike/blob/master/examples/PySpike_testdata.txt>`_.
+To quickly obtain spike trains from such files, PySpike provides the function :func:`.load_spike_trains_from_txt`.
+
+.. code:: python
+
+ import numpy as np
+ import pyspike as spk
+
+ spike_trains = spk.load_spike_trains_from_txt("SPIKY_testdata.txt",
+ edges=(0, 4000))
+
+This function expects the name of the data file as first parameter.
+Furthermore, the time interval of the spike train measurement (edges of the spike trains) should be provided as a pair of start- and end-time values.
+Furthermore, the spike trains are sorted via :code:`np.sort` (disable this feature by providing :code:`is_sorted=True` as a parameter to the load function).
+As result, :func:`.load_spike_trains_from_txt` returns a *list of arrays* containing the spike trains in the text file.
+
+
+Computing bivariate distances profiles
+---------------------------------------
+
+**Important note:**
+
+------------------------------
+
+ Spike trains are expected to be *sorted*!
+ For performance reasons, the PySpike distance functions do not check if the spike trains provided are indeed sorted.
+ Make sure that all your spike trains are sorted, which is ensured if you use the :func:`.load_spike_trains_from_txt` function with the parameter `is_sorted=False` (default).
+ If in doubt, use :meth:`.SpikeTrain.sort()` to ensure a correctly sorted spike train.
+
+ If you need to copy a spike train, use the :meth:`.SpikeTrain.copy()` method.
+ Simple assignment `t2 = t1` does not create a copy of the spike train data, but a reference as `numpy.array` is used for storing the data.
+
+------------------------------
+
+ISI-distance
+............
+
+The following code loads some exemplary spike trains, computes the dissimilarity profile of the ISI-distance of the first two :class:`.SpikeTrain` s, and plots it with matplotlib:
+
+.. code:: python
+
+ import matplotlib.pyplot as plt
+ import pyspike as spk
+
+ spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
+ edges=(0, 4000))
+ isi_profile = spk.isi_profile(spike_trains[0], spike_trains[1])
+ x, y = isi_profile.get_plottable_data()
+ plt.plot(x, y, '--k')
+ print("ISI distance: %.8f" % isi_profile.avrg())
+ plt.show()
+
+The ISI-profile is a piece-wise constant function, and hence the function :func:`.isi_profile` returns an instance of the :class:`.PieceWiseConstFunc` class.
+As shown above, this class allows you to obtain arrays that can be used to plot the function with :code:`plt.plt`, but also to compute the time average, which amounts to the final scalar ISI-distance.
+By default, the time average is computed for the whole :class:`.PieceWiseConstFunc` function.
+However, it is also possible to obtain the average of a specific interval by providing a pair of floats defining the start and end of the interval.
+For the above example, the following code computes the ISI-distances obtained from averaging the ISI-profile over four different intervals:
+
+.. code:: python
+
+ isi1 = isi_profile.avrg(interval=(0, 1000))
+ isi2 = isi_profile.avrg(interval=(1000, 2000))
+ isi3 = isi_profile.avrg(interval=[(0, 1000), (2000, 3000)])
+ isi4 = isi_profile.avrg(interval=[(1000, 2000), (3000, 4000)])
+
+Note, how also multiple intervals can be supplied by giving a list of tuples.
+
+If you are only interested in the scalar ISI-distance and not the profile, you can simply use:
+
+.. code:: python
+
+ isi_dist = spk.isi_distance(spike_trains[0], spike_trains[1], interval)
+
+where :code:`interval` is optional, as above, and if omitted the ISI-distance is computed for the complete spike trains.
+
+
+SPIKE-distance
+..............
+
+To compute for the spike distance profile you use the function :func:`.spike_profile` instead of :code:`isi_profile` above.
+But the general approach is very similar:
+
+.. code:: python
+
+ import matplotlib.pyplot as plt
+ import pyspike as spk
+
+ spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
+ edges=(0, 4000))
+ spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1])
+ x, y = spike_profile.get_plottable_data()
+ plt.plot(x, y, '--k')
+ print("SPIKE distance: %.8f" % spike_profile.avrg())
+ plt.show()
+
+This short example computes and plots the SPIKE-profile of the first two spike trains in the file :code:`PySpike_testdata.txt`.
+In contrast to the ISI-profile, a SPIKE-profile is a piece-wise *linear* function and is therefore represented by a :class:`.PieceWiseLinFunc` object.
+Just like the :class:`.PieceWiseConstFunc` for the ISI-profile, the :class:`.PieceWiseLinFunc` provides a :meth:`.PieceWiseLinFunc.get_plottable_data` member function that returns arrays that can be used directly to plot the function.
+Furthermore, the :meth:`.PieceWiseLinFunc.avrg` member function returns the average of the profile defined as the overall SPIKE distance.
+As above, you can provide an interval as a pair of floats as well as a sequence of such pairs to :code:`avrg` to specify the averaging interval if required.
+
+Again, you can use
+
+.. code:: python
+
+ spike_dist = spk.spike_distance(spike_trains[0], spike_trains[1], interval)
+
+to compute the SPIKE distance directly, if you are not interested in the profile at all.
+The parameter :code:`interval` is optional and if neglected the whole spike train is used.
+
+
+SPIKE synchronization
+.....................
+
+**Important note:**
+
+------------------------------
+
+ SPIKE-Synchronization measures *similarity*.
+ That means, a value of zero indicates absence of synchrony, while a value of one denotes the presence of synchrony.
+ This is exactly opposite to the other two measures: ISI- and SPIKE-distance.
+
+----------------------
+
+
+SPIKE synchronization is another approach to measure spike synchrony.
+In contrast to the SPIKE- and ISI-distance, it measures similarity instead of dissimilarity, i.e. higher values represent larger synchrony.
+Another difference is that the SPIKE synchronization profile is only defined exactly at the spike times, not for the whole interval of the spike trains.
+Therefore, it is represented by a :class:`.DiscreteFunction`.
+
+To compute for the spike synchronization profile, PySpike provides the function :func:`.spike_sync_profile`.
+The general handling of the profile, however, is similar to the other profiles above:
+
+.. code:: python
+
+ import matplotlib.pyplot as plt
+ import pyspike as spk
+
+ spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
+ edges=(0, 4000))
+ spike_profile = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
+ x, y = spike_profile.get_plottable_data()
+
+For the direct computation of the overall spike synchronization value within some interval, the :func:`.spike_sync` function can be used:
+
+.. code:: python
+
+ spike_sync = spk.spike_sync(spike_trains[0], spike_trains[1], interval)
+
+
+Computing multivariate profiles and distances
+----------------------------------------------
+
+To compute the multivariate ISI-profile, SPIKE-profile or SPIKE-Synchronization profile f a set of spike trains, PySpike provides multi-variate version of the profile function.
+The following example computes the multivariate ISI-, SPIKE- and SPIKE-Sync-profile for a list of spike trains using the :func:`.isi_profile_multi`, :func:`.spike_profile_multi`, :func:`.spike_sync_profile_multi` functions:
+
+.. code:: python
+
+ spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
+ edges=(0, 4000))
+ avrg_isi_profile = spk.isi_profile_multi(spike_trains)
+ avrg_spike_profile = spk.spike_profile_multi(spike_trains)
+ avrg_spike_sync_profile = spk.spike_sync_profile_multi(spike_trains)
+
+All functions take an optional parameter :code:`indices`, a list of indices that allows to define the spike trains that should be used for the multivariate profile.
+As before, if you are only interested in the distance values, and not in the profile, PySpike offers the functions: :func:`.isi_distance_multi`, :func:`.spike_distance_multi` and :func:`.spike_sync_multi`, that return the scalar overall multivariate ISI- and SPIKE-distance as well as the SPIKE-Synchronization value.
+Those functions also accept an :code:`interval` parameter that can be used to specify the begin and end of the averaging interval as a pair of floats, if neglected the complete interval is used.
+
+Another option to characterize large sets of spike trains are distance matrices.
+Each entry in the distance matrix represents a bivariate distance (similarity for SPIKE-Synchronization) of two spike trains.
+The distance matrix is symmetric and has zero values (ones) at the diagonal and is computed with the functions :func:`.isi_distance_matrix`, :func:`.spike_distance_matrix` and :func:`.spike_sync_matrix`.
+The following example computes and plots the ISI- and SPIKE-distance matrix as well as the SPIKE-Synchronization-matrix, with different intervals.
+
+.. code:: python
+
+ spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)
+
+ plt.figure()
+ isi_distance = spk.isi_distance_matrix(spike_trains)
+ plt.imshow(isi_distance, interpolation='none')
+ plt.title("ISI-distance")
+
+ plt.figure()
+ spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0,1000))
+ plt.imshow(spike_distance, interpolation='none')
+ plt.title("SPIKE-distance")
+
+ plt.figure()
+ spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000,4000))
+ plt.imshow(spike_sync, interpolation='none')
+ plt.title("SPIKE-Sync")
+
+ plt.show()
+
diff --git a/examples/performance.py b/examples/performance.py
new file mode 100644
index 0000000..1c31e8f
--- /dev/null
+++ b/examples/performance.py
@@ -0,0 +1,65 @@
+"""
+Compute distances of large sets of spike trains for performance tests
+
+Copyright 2015, Mario Mulansky <mario.mulansky@gmx.net>
+
+Distributed under the BSD License
+
+"""
+
+from __future__ import print_function
+
+import pyspike as spk
+from datetime import datetime
+import cProfile
+import pstats
+
+M = 100 # number of spike trains
+r = 1.0 # rate of Poisson spike times
+T = 1E3 # length of spike trains
+
+print("%d spike trains with %d spikes" % (M, int(r*T)))
+
+spike_trains = []
+
+t_start = datetime.now()
+for i in xrange(M):
+ spike_trains.append(spk.generate_poisson_spikes(r, T))
+t_end = datetime.now()
+runtime = (t_end-t_start).total_seconds()
+
+print("Spike generation runtime: %.3fs" % runtime)
+print()
+
+print("================ ISI COMPUTATIONS ================")
+print(" MULTIVARIATE DISTANCE")
+cProfile.run('spk.isi_distance_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
+
+print(" MULTIVARIATE PROFILE")
+cProfile.run('spk.isi_profile_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
+
+print("================ SPIKE COMPUTATIONS ================")
+print(" MULTIVARIATE DISTANCE")
+cProfile.run('spk.spike_distance_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
+
+print(" MULTIVARIATE PROFILE")
+cProfile.run('spk.spike_profile_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
+
+print("================ SPIKE-SYNC COMPUTATIONS ================")
+print(" MULTIVARIATE DISTANCE")
+cProfile.run('spk.spike_sync_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
+
+print(" MULTIVARIATE PROFILE")
+cProfile.run('spk.spike_sync_profile_multi(spike_trains)', 'performance.stat')
+p = pstats.Stats('performance.stat')
+p.strip_dirs().sort_stats('tottime').print_stats(5)
diff --git a/examples/profiles.py b/examples/profiles.py
new file mode 100644
index 0000000..05494bd
--- /dev/null
+++ b/examples/profiles.py
@@ -0,0 +1,66 @@
+""" profiles.py
+
+Simple example showing some functionality of distance profiles.
+
+Copyright 2015, Mario Mulansky <mario.mulansky@gmx.net>
+
+Distributed under the BSD License
+"""
+
+
+from __future__ import print_function
+
+import pyspike as spk
+
+spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
+ edges=(0, 4000))
+
+##### ISI PROFILES #######
+
+# compute the ISI profile of the first two spike trains
+f = spk.isi_profile(spike_trains[0], spike_trains[1])
+
+# ISI values at certain points
+t = 1200
+print("ISI value at t =", t, ":", f(t))
+t = [900, 1100, 2000, 3100]
+print("ISI value at t =", t, ":", f(t))
+print("Average ISI distance:", f.avrg())
+print()
+
+# compute the multivariate ISI profile
+f = spk.isi_profile_multi(spike_trains)
+
+t = 1200
+print("Multivariate ISI value at t =", t, ":", f(t))
+t = [900, 1100, 2000, 3100]
+print("Multivariate ISI value at t =", t, ":", f(t))
+print("Average multivariate ISI distance:", f.avrg())
+print()
+print()
+
+# for plotting, use the get_plottable_data() member function, see plot.py
+
+
+##### SPIKE PROFILES #######
+
+# compute the SPIKE profile of the first two spike trains
+f = spk.spike_profile(spike_trains[0], spike_trains[1])
+
+# SPIKE distance values at certain points
+t = 1200
+print("SPIKE value at t =", t, ":", f(t))
+t = [900, 1100, 2000, 3100]
+print("SPIKE value at t =", t, ":", f(t))
+print("Average SPIKE distance:", f.avrg())
+print()
+
+# compute the multivariate SPIKE profile
+f = spk.spike_profile_multi(spike_trains)
+
+# SPIKE values at certain points
+t = 1200
+print("Multivariate SPIKE value at t =", t, ":", f(t))
+t = [900, 1100, 2000, 3100]
+print("Multivariate SPIKE value at t =", t, ":", f(t))
+print("Average multivariate SPIKE distance:", f.avrg())
diff --git a/pyspike/DiscreteFunc.py b/pyspike/DiscreteFunc.py
index 33b7a81..17153ee 100644
--- a/pyspike/DiscreteFunc.py
+++ b/pyspike/DiscreteFunc.py
@@ -125,17 +125,21 @@ class DiscreteFunc(object):
def integral(self, interval=None):
""" Returns the integral over the given interval. For the discrete
- function, this amounts to the sum over all values divided by the total
- multiplicity.
+ function, this amounts to two values: the sum over all values and the
+ sum over all multiplicities.
:param interval: integration interval given as a pair of floats, or a
sequence of pairs in case of multiple intervals, if
None the integral over the whole function is computed.
:type interval: Pair, sequence of pairs, or None.
- :returns: the integral
- :rtype: float
+ :returns: the summed values and the summed multiplicity
+ :rtype: pair of float
"""
+ if len(self.y) <= 2:
+ # no actual values in the profile, return spike sync of 1
+ return 1.0, 1.0
+
def get_indices(ival):
""" Retuns the indeces surrounding the given interval"""
start_ind = np.searchsorted(self.x, ival[0], side='right')
@@ -147,7 +151,7 @@ class DiscreteFunc(object):
if interval is None:
# no interval given, integrate over the whole spike train
# don't count the first value, which is zero by definition
- return 1.0 * np.sum(self.y[1:-1]) / np.sum(self.mp[1:-1])
+ return (1.0 * np.sum(self.y[1:-1]), np.sum(self.mp[1:-1]))
# check if interval is as sequence
assert isinstance(interval, collections.Sequence), \
@@ -156,7 +160,7 @@ class DiscreteFunc(object):
if not isinstance(interval[0], collections.Sequence):
# find the indices corresponding to the interval
start_ind, end_ind = get_indices(interval)
- return (np.sum(self.y[start_ind:end_ind]) /
+ return (np.sum(self.y[start_ind:end_ind]),
np.sum(self.mp[start_ind:end_ind]))
else:
value = 0.0
@@ -166,7 +170,7 @@ class DiscreteFunc(object):
start_ind, end_ind = get_indices(ival)
value += np.sum(self.y[start_ind:end_ind])
multiplicity += np.sum(self.mp[start_ind:end_ind])
- return value/multiplicity
+ return (value, multiplicity)
def avrg(self, interval=None):
""" Computes the average of the interval sequence:
@@ -180,7 +184,8 @@ class DiscreteFunc(object):
:returns: the average a.
:rtype: float
"""
- return self.integral(interval)
+ val, mp = self.integral(interval)
+ return val/mp
def add(self, f):
""" Adds another `DiscreteFunc` function to this function.
diff --git a/pyspike/PieceWiseConstFunc.py b/pyspike/PieceWiseConstFunc.py
index 41998ef..2705443 100644
--- a/pyspike/PieceWiseConstFunc.py
+++ b/pyspike/PieceWiseConstFunc.py
@@ -26,6 +26,52 @@ class PieceWiseConstFunc(object):
self.x = np.array(x)
self.y = np.array(y)
+ def __call__(self, t):
+ """ Returns the function value for the given time t. If t is a list of
+ times, the corresponding list of values is returned.
+
+ :param: time t, or list of times
+ :returns: function value(s) at that time(s).
+ """
+ assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \
+ "Invalid time: " + str(t)
+
+ ind = np.searchsorted(self.x, t, side='right')
+
+ if isinstance(t, collections.Sequence):
+ # t is a sequence of values
+ # correct the cases t == x[0], t == x[-1]
+ ind[ind == 0] = 1
+ ind[ind == len(self.x)] = len(self.x)-1
+ value = self.y[ind-1]
+ # correct the values at exact spike times: there the value should
+ # be the at half of the step
+ # obtain the 'left' side indices for t
+ ind_l = np.searchsorted(self.x, t, side='left')
+ # if left and right side indices differ, the time t has to appear
+ # in self.x
+ ind_at_spike = np.logical_and(np.logical_and(ind != ind_l,
+ ind > 1),
+ ind < len(self.x))
+ # get the corresponding indices for the resulting value array
+ val_ind = np.arange(len(ind))[ind_at_spike]
+ # and for the arrays self.x, y1, y2
+ xy_ind = ind[ind_at_spike]
+ # use the middle of the left and right ISI value
+ value[val_ind] = 0.5 * (self.y[xy_ind-1] + self.y[xy_ind-2])
+ return value
+ else: # t is a single value
+ # specific check for interval edges
+ if t == self.x[0]:
+ return self.y[0]
+ if t == self.x[-1]:
+ return self.y[-1]
+ # check if we are on any other exact spike time
+ if sum(self.x == t) > 0:
+ # use the middle of the left and right ISI value
+ return 0.5 * (self.y[ind-1] + self.y[ind-2])
+ return self.y[ind-1]
+
def copy(self):
""" Returns a copy of itself
diff --git a/pyspike/PieceWiseLinFunc.py b/pyspike/PieceWiseLinFunc.py
index f2442be..c0dd475 100644
--- a/pyspike/PieceWiseLinFunc.py
+++ b/pyspike/PieceWiseLinFunc.py
@@ -29,6 +29,64 @@ class PieceWiseLinFunc:
self.y1 = np.array(y1)
self.y2 = np.array(y2)
+ def __call__(self, t):
+ """ Returns the function value for the given time t. If t is a list of
+ times, the corresponding list of values is returned.
+
+ :param: time t, or list of times
+ :returns: function value(s) at that time(s).
+ """
+ def intermediate_value(x0, x1, y0, y1, x):
+ """ computes the intermediate value of a linear function """
+ return y0 + (y1-y0)*(x-x0)/(x1-x0)
+
+ assert np.all(t >= self.x[0]) and np.all(t <= self.x[-1]), \
+ "Invalid time: " + str(t)
+
+ ind = np.searchsorted(self.x, t, side='right')
+
+ if isinstance(t, collections.Sequence):
+ # t is a sequence of values
+ # correct the cases t == x[0], t == x[-1]
+ ind[ind == 0] = 1
+ ind[ind == len(self.x)] = len(self.x)-1
+ value = intermediate_value(self.x[ind-1],
+ self.x[ind],
+ self.y1[ind-1],
+ self.y2[ind-1],
+ t)
+ # correct the values at exact spike times: there the value should
+ # be the at half of the step
+ # obtain the 'left' side indices for t
+ ind_l = np.searchsorted(self.x, t, side='left')
+ # if left and right side indices differ, the time t has to appear
+ # in self.x
+ ind_at_spike = np.logical_and(np.logical_and(ind != ind_l,
+ ind > 1),
+ ind < len(self.x))
+ # get the corresponding indices for the resulting value array
+ val_ind = np.arange(len(ind))[ind_at_spike]
+ # and for the values in self.x, y1, y2
+ xy_ind = ind[ind_at_spike]
+ # the values are defined as the average of the left and right limit
+ value[val_ind] = 0.5 * (self.y1[xy_ind-1] + self.y2[xy_ind-2])
+ return value
+ else: # t is a single value
+ # specific check for interval edges
+ if t == self.x[0]:
+ return self.y1[0]
+ if t == self.x[-1]:
+ return self.y2[-1]
+ # check if we are on any other exact spike time
+ if sum(self.x == t) > 0:
+ # use the middle of the left and right Spike value
+ return 0.5 * (self.y1[ind-1] + self.y2[ind-2])
+ return intermediate_value(self.x[ind-1],
+ self.x[ind],
+ self.y1[ind-1],
+ self.y2[ind-1],
+ t)
+
def copy(self):
""" Returns a copy of itself
diff --git a/pyspike/SpikeTrain.py b/pyspike/SpikeTrain.py
index a02b7ab..9127b60 100644
--- a/pyspike/SpikeTrain.py
+++ b/pyspike/SpikeTrain.py
@@ -48,3 +48,13 @@ class SpikeTrain(object):
"""
return SpikeTrain(self.spikes.copy(), [self.t_start, self.t_end])
+
+ def get_spikes_non_empty(self):
+ """Returns the spikes of this spike train with auxiliary spikes in case
+ of empty spike trains.
+ """
+ if len(self.spikes) < 2:
+ return np.unique(np.insert([self.t_start, self.t_end], 1,
+ self.spikes))
+ else:
+ return self.spikes
diff --git a/pyspike/cython/cython_add.pyx b/pyspike/cython/cython_add.pyx
index ac64005..8da1e53 100644
--- a/pyspike/cython/cython_add.pyx
+++ b/pyspike/cython/cython_add.pyx
@@ -83,13 +83,9 @@ def add_piece_wise_const_cython(double[:] x1, double[:] y1,
else: # both arrays reached the end simultaneously
# only the last x-value missing
x_new[index+1] = x1[N1-1]
- # the last value is again the end of the interval
- # x_new[index+1] = x1[-1]
- # only use the data that was actually filled
- x1 = x_new[:index+2]
- y1 = y_new[:index+1]
# end nogil
- return np.array(x_new[:index+2]), np.array(y_new[:index+1])
+ # return np.asarray(x_new[:index+2]), np.asarray(y_new[:index+1])
+ return np.asarray(x_new[:index+2]), np.asarray(y_new[:index+1])
############################################################
@@ -169,9 +165,9 @@ def add_piece_wise_lin_cython(double[:] x1, double[:] y11, double[:] y12,
y2_new[index] = y12[N1-2]+y22[N2-2]
# only use the data that was actually filled
# end nogil
- return (np.array(x_new[:index+2]),
- np.array(y1_new[:index+1]),
- np.array(y2_new[:index+1]))
+ return (np.asarray(x_new[:index+2]),
+ np.asarray(y1_new[:index+1]),
+ np.asarray(y2_new[:index+1]))
############################################################
@@ -230,6 +226,6 @@ def add_discrete_function_cython(double[:] x1, double[:] y1, double[:] mp1,
# the last value is again the end of the interval
# only use the data that was actually filled
- return (np.array(x_new[:index+1]),
- np.array(y_new[:index+1]),
- np.array(mp_new[:index+1]))
+ return (np.asarray(x_new[:index+1]),
+ np.asarray(y_new[:index+1]),
+ np.asarray(mp_new[:index+1]))
diff --git a/pyspike/cython/cython_distances.pyx b/pyspike/cython/cython_distances.pyx
new file mode 100644
index 0000000..c4f2349
--- /dev/null
+++ b/pyspike/cython/cython_distances.pyx
@@ -0,0 +1,398 @@
+#cython: boundscheck=False
+#cython: wraparound=False
+#cython: cdivision=True
+
+"""
+cython_distances.pyx
+
+cython implementation of the isi-, spike- and spike-sync distances
+
+Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects
+improves the performance of spike_distance by a factor of 10!
+
+Copyright 2015, Mario Mulansky <mario.mulansky@gmx.net>
+
+Distributed under the BSD License
+
+"""
+
+"""
+To test whether things can be optimized: remove all yellow stuff
+in the html output::
+
+ cython -a cython_distances.pyx
+
+which gives::
+
+ cython_distances.html
+
+"""
+
+import numpy as np
+cimport numpy as np
+
+from libc.math cimport fabs
+from libc.math cimport fmax
+from libc.math cimport fmin
+
+DTYPE = np.float
+ctypedef np.float_t DTYPE_t
+
+
+############################################################
+# isi_distance_cython
+############################################################
+def isi_distance_cython(double[:] s1, double[:] s2,
+ double t_start, double t_end):
+
+ cdef double isi_value
+ cdef int index1, index2, index
+ cdef int N1, N2
+ cdef double nu1, nu2
+ cdef double last_t, curr_t, curr_isi
+ isi_value = 0.0
+ N1 = len(s1)
+ N2 = len(s2)
+
+ # first interspike interval - check if a spike exists at the start time
+ if s1[0] > t_start:
+ # edge correction
+ nu1 = fmax(s1[0]-t_start, s1[1]-s1[0])
+ index1 = -1
+ else:
+ nu1 = s1[1]-s1[0]
+ index1 = 0
+
+ if s2[0] > t_start:
+ # edge correction
+ nu2 = fmax(s2[0]-t_start, s2[1]-s2[0])
+ index2 = -1
+ else:
+ nu2 = s2[1]-s2[0]
+ index2 = 0
+
+ last_t = t_start
+ curr_isi = fabs(nu1-nu2)/fmax(nu1, nu2)
+ index = 1
+
+ with nogil: # release the interpreter to allow multithreading
+ while index1+index2 < N1+N2-2:
+ # check which spike is next, only if there are spikes left in 1
+ # next spike in 1 is earlier, or there are no spikes left in 2
+ if (index1 < N1-1) and ((index2 == N2-1) or
+ (s1[index1+1] < s2[index2+1])):
+ index1 += 1
+ curr_t = s1[index1]
+ if index1 < N1-1:
+ nu1 = s1[index1+1]-s1[index1]
+ else:
+ # edge correction
+ nu1 = fmax(t_end-s1[index1], nu1)
+ elif (index2 < N2-1) and ((index1 == N1-1) or
+ (s1[index1+1] > s2[index2+1])):
+ index2 += 1
+ curr_t = s2[index2]
+ if index2 < N2-1:
+ nu2 = s2[index2+1]-s2[index2]
+ else:
+ # edge correction
+ nu2 = fmax(t_end-s2[index2], nu2)
+ else: # s1[index1+1] == s2[index2+1]
+ index1 += 1
+ index2 += 1
+ curr_t = s1[index1]
+ if index1 < N1-1:
+ nu1 = s1[index1+1]-s1[index1]
+ else:
+ # edge correction
+ nu1 = fmax(t_end-s1[index1], nu1)
+ if index2 < N2-1:
+ nu2 = s2[index2+1]-s2[index2]
+ else:
+ # edge correction
+ nu2 = fmax(t_end-s2[index2], nu2)
+ # compute the corresponding isi-distance
+ isi_value += curr_isi * (curr_t - last_t)
+ curr_isi = fabs(nu1 - nu2) / fmax(nu1, nu2)
+ last_t = curr_t
+ index += 1
+
+ isi_value += curr_isi * (t_end - last_t)
+ # end nogil
+
+ return isi_value / (t_end-t_start)
+
+
+############################################################
+# get_min_dist_cython
+############################################################
+cdef inline double get_min_dist_cython(double spike_time,
+ double[:] spike_train,
+ # use memory view to ensure inlining
+ # np.ndarray[DTYPE_t,ndim=1] spike_train,
+ int N,
+ int start_index,
+ double t_start, double t_end) nogil:
+ """ Returns the minimal distance |spike_time - spike_train[i]|
+ with i>=start_index.
+ """
+ cdef double d, d_temp
+ # start with the distance to the start time
+ d = fabs(spike_time - t_start)
+ if start_index < 0:
+ start_index = 0
+ while start_index < N:
+ d_temp = fabs(spike_time - spike_train[start_index])
+ if d_temp > d:
+ return d
+ else:
+ d = d_temp
+ start_index += 1
+
+ # finally, check the distance to end time
+ d_temp = fabs(t_end - spike_time)
+ if d_temp > d:
+ return d
+ else:
+ return d_temp
+
+
+############################################################
+# isi_avrg_cython
+############################################################
+cdef inline double isi_avrg_cython(double isi1, double isi2) nogil:
+ return 0.5*(isi1+isi2)*(isi1+isi2)
+ # alternative definition to obtain <S> ~ 0.5 for Poisson spikes
+ # return 0.5*(isi1*isi1+isi2*isi2)
+
+
+############################################################
+# spike_distance_cython
+############################################################
+def spike_distance_cython(double[:] t1, double[:] t2,
+ double t_start, double t_end):
+
+ cdef int N1, N2, index1, index2, index
+ cdef double t_p1, t_f1, t_p2, t_f2, dt_p1, dt_p2, dt_f1, dt_f2
+ cdef double isi1, isi2, s1, s2
+ cdef double y_start, y_end, t_last, t_current, spike_value
+
+ spike_value = 0.0
+
+ N1 = len(t1)
+ N2 = len(t2)
+
+ with nogil: # release the interpreter to allow multithreading
+ t_last = t_start
+ t_p1 = t_start
+ t_p2 = t_start
+ if t1[0] > t_start:
+ # dt_p1 = t2[0]-t_start
+ t_f1 = t1[0]
+ dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end)
+ isi1 = fmax(t_f1-t_start, t1[1]-t1[0])
+ dt_p1 = dt_f1
+ s1 = dt_p1*(t_f1-t_start)/isi1
+ index1 = -1
+ else:
+ t_f1 = t1[1]
+ dt_f1 = get_min_dist_cython(t_f1, t2, N2, 0, t_start, t_end)
+ dt_p1 = 0.0
+ isi1 = t1[1]-t1[0]
+ s1 = dt_p1
+ index1 = 0
+ if t2[0] > t_start:
+ # dt_p1 = t2[0]-t_start
+ t_f2 = t2[0]
+ dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end)
+ dt_p2 = dt_f2
+ isi2 = fmax(t_f2-t_start, t2[1]-t2[0])
+ s2 = dt_p2*(t_f2-t_start)/isi2
+ index2 = -1
+ else:
+ t_f2 = t2[1]
+ dt_f2 = get_min_dist_cython(t_f2, t1, N1, 0, t_start, t_end)
+ dt_p2 = 0.0
+ isi2 = t2[1]-t2[0]
+ s2 = dt_p2
+ index2 = 0
+
+ y_start = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2)
+ index = 1
+
+ while index1+index2 < N1+N2-2:
+ # print(index, index1, index2)
+ if (index1 < N1-1) and (t_f1 < t_f2 or index2 == N2-1):
+ index1 += 1
+ # first calculate the previous interval end value
+ s1 = dt_f1*(t_f1-t_p1) / isi1
+ # the previous time now was the following time before:
+ dt_p1 = dt_f1
+ t_p1 = t_f1 # t_p1 contains the current time point
+ # get the next time
+ if index1 < N1-1:
+ t_f1 = t1[index1+1]
+ else:
+ t_f1 = t_end
+ t_curr = t_p1
+ s2 = (dt_p2*(t_f2-t_p1) + dt_f2*(t_p1-t_p2)) / isi2
+ y_end = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2)
+
+ spike_value += 0.5*(y_start + y_end) * (t_curr - t_last)
+
+ # now the next interval start value
+ if index1 < N1-1:
+ dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2,
+ t_start, t_end)
+ isi1 = t_f1-t_p1
+ s1 = dt_p1
+ else:
+ dt_f1 = dt_p1
+ isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2])
+ # s1 needs adjustment due to change of isi1
+ s1 = dt_p1*(t_end-t1[N1-1])/isi1
+ # s2 is the same as above, thus we can compute y2 immediately
+ y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2)
+ elif (index2 < N2-1) and (t_f1 > t_f2 or index1 == N1-1):
+ index2 += 1
+ # first calculate the previous interval end value
+ s2 = dt_f2*(t_f2-t_p2) / isi2
+ # the previous time now was the following time before:
+ dt_p2 = dt_f2
+ t_p2 = t_f2 # t_p2 contains the current time point
+ # get the next time
+ if index2 < N2-1:
+ t_f2 = t2[index2+1]
+ else:
+ t_f2 = t_end
+ t_curr = t_p2
+ s1 = (dt_p1*(t_f1-t_p2) + dt_f1*(t_p2-t_p1)) / isi1
+ y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2)
+
+ spike_value += 0.5*(y_start + y_end) * (t_curr - t_last)
+
+ # now the next interval start value
+ if index2 < N2-1:
+ dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1,
+ t_start, t_end)
+ isi2 = t_f2-t_p2
+ s2 = dt_p2
+ else:
+ dt_f2 = dt_p2
+ isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2])
+ # s2 needs adjustment due to change of isi2
+ s2 = dt_p2*(t_end-t2[N2-1])/isi2
+ # s1 is the same as above, thus we can compute y2 immediately
+ y_start = (s1*isi2 + s2*isi1)/isi_avrg_cython(isi1, isi2)
+ else: # t_f1 == t_f2 - generate only one event
+ index1 += 1
+ index2 += 1
+ t_p1 = t_f1
+ t_p2 = t_f2
+ dt_p1 = 0.0
+ dt_p2 = 0.0
+ t_curr = t_f1
+ y_end = 0.0
+ spike_value += 0.5*(y_start + y_end) * (t_curr - t_last)
+ y_start = 0.0
+ if index1 < N1-1:
+ t_f1 = t1[index1+1]
+ dt_f1 = get_min_dist_cython(t_f1, t2, N2, index2,
+ t_start, t_end)
+ isi1 = t_f1 - t_p1
+ else:
+ t_f1 = t_end
+ dt_f1 = dt_p1
+ isi1 = fmax(t_end-t1[N1-1], t1[N1-1]-t1[N1-2])
+ if index2 < N2-1:
+ t_f2 = t2[index2+1]
+ dt_f2 = get_min_dist_cython(t_f2, t1, N1, index1,
+ t_start, t_end)
+ isi2 = t_f2 - t_p2
+ else:
+ t_f2 = t_end
+ dt_f2 = dt_p2
+ isi2 = fmax(t_end-t2[N2-1], t2[N2-1]-t2[N2-2])
+ index += 1
+ t_last = t_curr
+ # isi1 = max(t_end-t1[N1-1], t1[N1-1]-t1[N1-2])
+ # isi2 = max(t_end-t2[N2-1], t2[N2-1]-t2[N2-2])
+ s1 = dt_f1*(t_end-t1[N1-1])/isi1
+ s2 = dt_f2*(t_end-t2[N2-1])/isi2
+ y_end = (s1*isi2 + s2*isi1) / isi_avrg_cython(isi1, isi2)
+ spike_value += 0.5*(y_start + y_end) * (t_end - t_last)
+ # end nogil
+
+ # use only the data added above
+ # could be less than original length due to equal spike times
+ return spike_value / (t_end-t_start)
+
+
+
+############################################################
+# get_tau
+############################################################
+cdef inline double get_tau(double[:] spikes1, double[:] spikes2,
+ int i, int j, double interval, double max_tau):
+ cdef double m = interval # use interval length as initial tau
+ cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1
+ cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1
+ if i < N1 and i > -1:
+ m = fmin(m, spikes1[i+1]-spikes1[i])
+ if j < N2 and j > -1:
+ m = fmin(m, spikes2[j+1]-spikes2[j])
+ if i > 0:
+ m = fmin(m, spikes1[i]-spikes1[i-1])
+ if j > 0:
+ m = fmin(m, spikes2[j]-spikes2[j-1])
+ m *= 0.5
+ if max_tau > 0.0:
+ m = fmin(m, max_tau)
+ return m
+
+
+############################################################
+# coincidence_value_cython
+############################################################
+def coincidence_value_cython(double[:] spikes1, double[:] spikes2,
+ double t_start, double t_end, double max_tau):
+
+ cdef int N1 = len(spikes1)
+ cdef int N2 = len(spikes2)
+ cdef int i = -1
+ cdef int j = -1
+ cdef double coinc = 0.0
+ cdef double mp = 0.0
+ cdef double interval = t_end - t_start
+ cdef double tau
+ while i + j < N1 + N2 - 2:
+ if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]):
+ i += 1
+ mp += 1
+ tau = get_tau(spikes1, spikes2, i, j, interval, max_tau)
+ if j > -1 and spikes1[i]-spikes2[j] < tau:
+ # coincidence between the current spike and the previous spike
+ # both get marked with 1
+ coinc += 2
+ elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]):
+ j += 1
+ mp += 1
+ tau = get_tau(spikes1, spikes2, i, j, interval, max_tau)
+ if i > -1 and spikes2[j]-spikes1[i] < tau:
+ # coincidence between the current spike and the previous spike
+ # both get marked with 1
+ coinc += 2
+ else: # spikes1[i+1] = spikes2[j+1]
+ # advance in both spike trains
+ j += 1
+ i += 1
+ # add only one event, but with coincidence 2 and multiplicity 2
+ mp += 2
+ coinc += 2
+
+ if coinc == 0 and mp == 0:
+ # empty spike trains -> spike sync = 1 by definition
+ coinc = 1
+ mp = 1
+
+ return coinc, mp
diff --git a/pyspike/cython/cython_distance.pyx b/pyspike/cython/cython_profiles.pyx
index 6ee0181..f9893eb 100644
--- a/pyspike/cython/cython_distance.pyx
+++ b/pyspike/cython/cython_profiles.pyx
@@ -3,14 +3,14 @@
#cython: cdivision=True
"""
-cython_distances.pyx
+cython_profiles.pyx
-cython implementation of the isi- and spike-distance
+cython implementation of the isi-, spike- and spike-sync profiles
Note: using cython memoryviews (e.g. double[:]) instead of ndarray objects
improves the performance of spike_distance by a factor of 10!
-Copyright 2014, Mario Mulansky <mario.mulansky@gmx.net>
+Copyright 2014-2015, Mario Mulansky <mario.mulansky@gmx.net>
Distributed under the BSD License
@@ -20,11 +20,11 @@ Distributed under the BSD License
To test whether things can be optimized: remove all yellow stuff
in the html output::
- cython -a cython_distance.pyx
+ cython -a cython_profiles.pyx
which gives::
- cython_distance.html
+ cython_profiles.html
"""
@@ -40,10 +40,10 @@ ctypedef np.float_t DTYPE_t
############################################################
-# isi_distance_cython
+# isi_profile_cython
############################################################
-def isi_distance_cython(double[:] s1, double[:] s2,
- double t_start, double t_end):
+def isi_profile_cython(double[:] s1, double[:] s2,
+ double t_start, double t_end):
cdef double[:] spike_events
cdef double[:] isi_values
@@ -173,10 +173,10 @@ cdef inline double isi_avrg_cython(double isi1, double isi2) nogil:
############################################################
-# spike_distance_cython
+# spike_profile_cython
############################################################
-def spike_distance_cython(double[:] t1, double[:] t2,
- double t_start, double t_end):
+def spike_profile_cython(double[:] t1, double[:] t2,
+ double t_start, double t_end):
cdef double[:] spike_events
cdef double[:] y_starts
@@ -342,11 +342,11 @@ def spike_distance_cython(double[:] t1, double[:] t2,
############################################################
-# coincidence_python
+# get_tau
############################################################
cdef inline double get_tau(double[:] spikes1, double[:] spikes2,
- int i, int j, double max_tau):
- cdef double m = 1E100 # some huge number
+ int i, int j, double interval, double max_tau):
+ cdef double m = interval # use interval as initial tau
cdef int N1 = spikes1.shape[0]-1 # len(spikes1)-1
cdef int N2 = spikes2.shape[0]-1 # len(spikes2)-1
if i < N1 and i > -1:
@@ -364,10 +364,10 @@ cdef inline double get_tau(double[:] spikes1, double[:] spikes2,
############################################################
-# coincidence_cython
+# coincidence_profile_cython
############################################################
-def coincidence_cython(double[:] spikes1, double[:] spikes2,
- double t_start, double t_end, double max_tau):
+def coincidence_profile_cython(double[:] spikes1, double[:] spikes2,
+ double t_start, double t_end, double max_tau):
cdef int N1 = len(spikes1)
cdef int N2 = len(spikes2)
@@ -377,12 +377,13 @@ def coincidence_cython(double[:] spikes1, double[:] spikes2,
cdef double[:] st = np.zeros(N1 + N2 + 2) # spike times
cdef double[:] c = np.zeros(N1 + N2 + 2) # coincidences
cdef double[:] mp = np.ones(N1 + N2 + 2) # multiplicity
+ cdef double interval = t_end - t_start
cdef double tau
while i + j < N1 + N2 - 2:
if (i < N1-1) and (j == N2-1 or spikes1[i+1] < spikes2[j+1]):
i += 1
n += 1
- tau = get_tau(spikes1, spikes2, i, j, max_tau)
+ tau = get_tau(spikes1, spikes2, i, j, interval, max_tau)
st[n] = spikes1[i]
if j > -1 and spikes1[i]-spikes2[j] < tau:
# coincidence between the current spike and the previous spike
@@ -392,7 +393,7 @@ def coincidence_cython(double[:] spikes1, double[:] spikes2,
elif (j < N2-1) and (i == N1-1 or spikes1[i+1] > spikes2[j+1]):
j += 1
n += 1
- tau = get_tau(spikes1, spikes2, i, j, max_tau)
+ tau = get_tau(spikes1, spikes2, i, j, interval, max_tau)
st[n] = spikes2[j]
if i > -1 and spikes2[j]-spikes1[i] < tau:
# coincidence between the current spike and the previous spike
@@ -415,9 +416,13 @@ def coincidence_cython(double[:] spikes1, double[:] spikes2,
st[0] = t_start
st[len(st)-1] = t_end
- c[0] = c[1]
- c[len(c)-1] = c[len(c)-2]
- mp[0] = mp[1]
- mp[len(mp)-1] = mp[len(mp)-2]
+ if N1 + N2 > 0:
+ c[0] = c[1]
+ c[len(c)-1] = c[len(c)-2]
+ mp[0] = mp[1]
+ mp[len(mp)-1] = mp[len(mp)-2]
+ else:
+ c[0] = 1
+ c[1] = 1
return st, c, mp
diff --git a/pyspike/cython/python_backend.py b/pyspike/cython/python_backend.py
index 1fd8c42..69a420f 100644
--- a/pyspike/cython/python_backend.py
+++ b/pyspike/cython/python_backend.py
@@ -340,7 +340,7 @@ def cumulative_sync_python(spikes1, spikes2):
def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau):
def get_tau(spikes1, spikes2, i, j, max_tau):
- m = 1E100 # some huge number
+ m = t_end - t_start # use interval as initial tau
if i < len(spikes1)-1 and i > -1:
m = min(m, spikes1[i+1]-spikes1[i])
if j < len(spikes2)-1 and j > -1:
@@ -399,10 +399,14 @@ def coincidence_python(spikes1, spikes2, t_start, t_end, max_tau):
st[0] = t_start
st[len(st)-1] = t_end
- c[0] = c[1]
- c[len(c)-1] = c[len(c)-2]
- mp[0] = mp[1]
- mp[len(mp)-1] = mp[len(mp)-2]
+ if N1 + N2 > 0:
+ c[0] = c[1]
+ c[len(c)-1] = c[len(c)-2]
+ mp[0] = mp[1]
+ mp[len(mp)-1] = mp[len(mp)-2]
+ else:
+ c[0] = 1
+ c[1] = 1
return st, c, mp
diff --git a/pyspike/generic.py b/pyspike/generic.py
index 4f278d2..41affcb 100644
--- a/pyspike/generic.py
+++ b/pyspike/generic.py
@@ -31,6 +31,69 @@ def _generic_profile_multi(spike_trains, pair_distance_func, indices=None):
Returns:
- The averaged multi-variate distance of all pairs
"""
+
+ def divide_and_conquer(pairs1, pairs2):
+ """ recursive calls by splitting the two lists in half.
+ """
+ L1 = len(pairs1)
+ if L1 > 1:
+ dist_prof1 = divide_and_conquer(pairs1[:L1/2], pairs1[L1/2:])
+ else:
+ dist_prof1 = pair_distance_func(spike_trains[pairs1[0][0]],
+ spike_trains[pairs1[0][1]])
+ L2 = len(pairs2)
+ if L2 > 1:
+ dist_prof2 = divide_and_conquer(pairs2[:L2/2], pairs2[L2/2:])
+ else:
+ dist_prof2 = pair_distance_func(spike_trains[pairs2[0][0]],
+ spike_trains[pairs2[0][1]])
+ dist_prof1.add(dist_prof2)
+ return dist_prof1
+
+ if indices is None:
+ indices = np.arange(len(spike_trains))
+ indices = np.array(indices)
+ # check validity of indices
+ assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \
+ "Invalid index list."
+ # generate a list of possible index pairs
+ pairs = [(indices[i], j) for i in range(len(indices))
+ for j in indices[i+1:]]
+
+ L = len(pairs)
+ if L > 1:
+ # recursive iteration through the list of pairs to get average profile
+ avrg_dist = divide_and_conquer(pairs[:len(pairs)/2],
+ pairs[len(pairs)/2:])
+ else:
+ avrg_dist = pair_distance_func(spike_trains[pairs[0][0]],
+ spike_trains[pairs[0][1]])
+
+ return avrg_dist, L
+
+
+############################################################
+# _generic_distance_multi
+############################################################
+def _generic_distance_multi(spike_trains, pair_distance_func,
+ indices=None, interval=None):
+ """ Internal implementation detail, don't call this function directly,
+ use isi_distance_multi or spike_distance_multi instead.
+
+ Computes the multi-variate distance for a set of spike-trains using the
+ pair_dist_func to compute pair-wise distances. That is it computes the
+ average distance of all pairs of spike-trains:
+ :math:`S(t) = 2/((N(N-1)) sum_{<i,j>} D_{i,j}`,
+ where the sum goes over all pairs <i,j>.
+ Args:
+ - spike_trains: list of spike trains
+ - pair_distance_func: function computing the distance of two spike trains
+ - indices: list of indices defining which spike trains to use,
+ if None all given spike trains are used (default=None)
+ Returns:
+ - The averaged multi-variate distance of all pairs
+ """
+
if indices is None:
indices = np.arange(len(spike_trains))
indices = np.array(indices)
@@ -40,13 +103,13 @@ def _generic_profile_multi(spike_trains, pair_distance_func, indices=None):
# generate a list of possible index pairs
pairs = [(indices[i], j) for i in range(len(indices))
for j in indices[i+1:]]
- # start with first pair
- (i, j) = pairs[0]
- average_dist = pair_distance_func(spike_trains[i], spike_trains[j])
- for (i, j) in pairs[1:]:
- current_dist = pair_distance_func(spike_trains[i], spike_trains[j])
- average_dist.add(current_dist) # add to the average
- return average_dist, len(pairs)
+
+ avrg_dist = 0.0
+ for (i, j) in pairs:
+ avrg_dist += pair_distance_func(spike_trains[i], spike_trains[j],
+ interval)
+
+ return avrg_dist/len(pairs)
############################################################
diff --git a/pyspike/isi_distance.py b/pyspike/isi_distance.py
index aeab0df..5ea555d 100644
--- a/pyspike/isi_distance.py
+++ b/pyspike/isi_distance.py
@@ -3,7 +3,8 @@
# Distributed under the BSD License
from pyspike import PieceWiseConstFunc
-from pyspike.generic import _generic_profile_multi, _generic_distance_matrix
+from pyspike.generic import _generic_profile_multi, _generic_distance_multi, \
+ _generic_distance_matrix
############################################################
@@ -24,24 +25,25 @@ def isi_profile(spike_train1, spike_train2):
"""
# check whether the spike trains are defined for the same interval
assert spike_train1.t_start == spike_train2.t_start, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
assert spike_train1.t_end == spike_train2.t_end, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
# load cython implementation
try:
- from cython.cython_distance import isi_distance_cython \
- as isi_distance_impl
+ from cython.cython_profiles import isi_profile_cython \
+ as isi_profile_impl
except ImportError:
print("Warning: isi_distance_cython not found. Make sure that PySpike \
is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use python backend
from cython.python_backend import isi_distance_python \
- as isi_distance_impl
+ as isi_profile_impl
- times, values = isi_distance_impl(spike_train1.spikes, spike_train2.spikes,
- spike_train1.t_start, spike_train1.t_end)
+ times, values = isi_profile_impl(spike_train1.get_spikes_non_empty(),
+ spike_train2.get_spikes_non_empty(),
+ spike_train1.t_start, spike_train1.t_end)
return PieceWiseConstFunc(times, values)
@@ -65,7 +67,23 @@ def isi_distance(spike_train1, spike_train2, interval=None):
:returns: The isi-distance :math:`D_I`.
:rtype: double
"""
- return isi_profile(spike_train1, spike_train2).avrg(interval)
+
+ if interval is None:
+ # distance over the whole interval is requested: use specific function
+ # for optimal performance
+ try:
+ from cython.cython_distances import isi_distance_cython \
+ as isi_distance_impl
+
+ return isi_distance_impl(spike_train1.get_spikes_non_empty(),
+ spike_train2.get_spikes_non_empty(),
+ spike_train1.t_start, spike_train1.t_end)
+ except ImportError:
+ # Cython backend not available: fall back to profile averaging
+ return isi_profile(spike_train1, spike_train2).avrg(interval)
+ else:
+ # some specific interval is provided: use profile
+ return isi_profile(spike_train1, spike_train2).avrg(interval)
############################################################
@@ -112,7 +130,8 @@ def isi_distance_multi(spike_trains, indices=None, interval=None):
:returns: The time-averaged multivariate ISI distance :math:`D_I`
:rtype: double
"""
- return isi_profile_multi(spike_trains, indices).avrg(interval)
+ return _generic_distance_multi(spike_trains, isi_distance, indices,
+ interval)
############################################################
diff --git a/pyspike/spike_distance.py b/pyspike/spike_distance.py
index cc620d4..ac2d260 100644
--- a/pyspike/spike_distance.py
+++ b/pyspike/spike_distance.py
@@ -3,7 +3,8 @@
# Distributed under the BSD License
from pyspike import PieceWiseLinFunc
-from pyspike.generic import _generic_profile_multi, _generic_distance_matrix
+from pyspike.generic import _generic_profile_multi, _generic_distance_multi, \
+ _generic_distance_matrix
############################################################
@@ -24,26 +25,27 @@ def spike_profile(spike_train1, spike_train2):
"""
# check whether the spike trains are defined for the same interval
assert spike_train1.t_start == spike_train2.t_start, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
assert spike_train1.t_end == spike_train2.t_end, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
# cython implementation
try:
- from cython.cython_distance import spike_distance_cython \
- as spike_distance_impl
+ from cython.cython_profiles import spike_profile_cython \
+ as spike_profile_impl
except ImportError:
- print("Warning: spike_distance_cython not found. Make sure that \
+ print("Warning: spike_profile_cython not found. Make sure that \
PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use python backend
from cython.python_backend import spike_distance_python \
- as spike_distance_impl
+ as spike_profile_impl
+
+ times, y_starts, y_ends = spike_profile_impl(
+ spike_train1.get_spikes_non_empty(),
+ spike_train2.get_spikes_non_empty(),
+ spike_train1.t_start, spike_train1.t_end)
- times, y_starts, y_ends = spike_distance_impl(spike_train1.spikes,
- spike_train2.spikes,
- spike_train1.t_start,
- spike_train1.t_end)
return PieceWiseLinFunc(times, y_starts, y_ends)
@@ -67,7 +69,22 @@ def spike_distance(spike_train1, spike_train2, interval=None):
:rtype: double
"""
- return spike_profile(spike_train1, spike_train2).avrg(interval)
+ if interval is None:
+ # distance over the whole interval is requested: use specific function
+ # for optimal performance
+ try:
+ from cython.cython_distances import spike_distance_cython \
+ as spike_distance_impl
+ return spike_distance_impl(spike_train1.get_spikes_non_empty(),
+ spike_train2.get_spikes_non_empty(),
+ spike_train1.t_start,
+ spike_train1.t_end)
+ except ImportError:
+ # Cython backend not available: fall back to average profile
+ return spike_profile(spike_train1, spike_train2).avrg(interval)
+ else:
+ # some specific interval is provided: compute the whole profile
+ return spike_profile(spike_train1, spike_train2).avrg(interval)
############################################################
@@ -117,7 +134,8 @@ def spike_distance_multi(spike_trains, indices=None, interval=None):
:returns: The averaged multi-variate spike distance :math:`D_S`.
:rtype: double
"""
- return spike_profile_multi(spike_trains, indices).avrg(interval)
+ return _generic_distance_multi(spike_trains, spike_distance, indices,
+ interval)
############################################################
diff --git a/pyspike/spike_sync.py b/pyspike/spike_sync.py
index 9d2e363..40d98d2 100644
--- a/pyspike/spike_sync.py
+++ b/pyspike/spike_sync.py
@@ -3,6 +3,7 @@
# Copyright 2014-2015, Mario Mulansky <mario.mulansky@gmx.net>
# Distributed under the BSD License
+import numpy as np
from functools import partial
from pyspike import DiscreteFunc
from pyspike.generic import _generic_profile_multi, _generic_distance_matrix
@@ -29,35 +30,68 @@ def spike_sync_profile(spike_train1, spike_train2, max_tau=None):
"""
# check whether the spike trains are defined for the same interval
assert spike_train1.t_start == spike_train2.t_start, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
assert spike_train1.t_end == spike_train2.t_end, \
- "Given spike trains seems not to have auxiliary spikes!"
+ "Given spike trains are not defined on the same interval!"
# cython implementation
try:
- from cython.cython_distance import coincidence_cython \
- as coincidence_impl
+ from cython.cython_profiles import coincidence_profile_cython \
+ as coincidence_profile_impl
except ImportError:
print("Warning: spike_distance_cython not found. Make sure that \
PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use python backend
from cython.python_backend import coincidence_python \
- as coincidence_impl
+ as coincidence_profile_impl
if max_tau is None:
max_tau = 0.0
- times, coincidences, multiplicity = coincidence_impl(spike_train1.spikes,
- spike_train2.spikes,
- spike_train1.t_start,
- spike_train1.t_end,
- max_tau)
+ times, coincidences, multiplicity \
+ = coincidence_profile_impl(spike_train1.spikes, spike_train2.spikes,
+ spike_train1.t_start, spike_train1.t_end,
+ max_tau)
return DiscreteFunc(times, coincidences, multiplicity)
############################################################
+# _spike_sync_values
+############################################################
+def _spike_sync_values(spike_train1, spike_train2, interval, max_tau):
+ """" Internal function. Computes the summed coincidences and multiplicity
+ for spike synchronization of the two given spike trains.
+
+ Do not call this function directly, use `spike_sync` or `spike_sync_multi`
+ instead.
+ """
+ if interval is None:
+ # distance over the whole interval is requested: use specific function
+ # for optimal performance
+ try:
+ from cython.cython_distances import coincidence_value_cython \
+ as coincidence_value_impl
+ if max_tau is None:
+ max_tau = 0.0
+ c, mp = coincidence_value_impl(spike_train1.spikes,
+ spike_train2.spikes,
+ spike_train1.t_start,
+ spike_train1.t_end,
+ max_tau)
+ return c, mp
+ except ImportError:
+ # Cython backend not available: fall back to profile averaging
+ return spike_sync_profile(spike_train1, spike_train2,
+ max_tau).integral(interval)
+ else:
+ # some specific interval is provided: use profile
+ return spike_sync_profile(spike_train1, spike_train2,
+ max_tau).integral(interval)
+
+
+############################################################
# spike_sync
############################################################
def spike_sync(spike_train1, spike_train2, interval=None, max_tau=None):
@@ -80,8 +114,8 @@ def spike_sync(spike_train1, spike_train2, interval=None, max_tau=None):
:rtype: `double`
"""
- return spike_sync_profile(spike_train1, spike_train2,
- max_tau).avrg(interval)
+ c, mp = _spike_sync_values(spike_train1, spike_train2, interval, max_tau)
+ return 1.0*c/mp
############################################################
@@ -131,8 +165,25 @@ def spike_sync_multi(spike_trains, indices=None, interval=None, max_tau=None):
:rtype: double
"""
- return spike_sync_profile_multi(spike_trains, indices,
- max_tau).avrg(interval)
+ if indices is None:
+ indices = np.arange(len(spike_trains))
+ indices = np.array(indices)
+ # check validity of indices
+ assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \
+ "Invalid index list."
+ # generate a list of possible index pairs
+ pairs = [(indices[i], j) for i in range(len(indices))
+ for j in indices[i+1:]]
+
+ coincidence = 0.0
+ mp = 0.0
+ for (i, j) in pairs:
+ c, m = _spike_sync_values(spike_trains[i], spike_trains[j],
+ interval, max_tau)
+ coincidence += c
+ mp += m
+
+ return coincidence/mp
############################################################
diff --git a/setup.py b/setup.py
index e35123f..d853cdf 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,8 @@ else:
use_cython = True
if os.path.isfile("pyspike/cython/cython_add.c") and \
- os.path.isfile("pyspike/cython/cython_distance.c"):
+ os.path.isfile("pyspike/cython/cython_profiles.c") and \
+ os.path.isfile("pyspike/cython/cython_distances.c"):
use_c = True
else:
use_c = False
@@ -33,20 +34,22 @@ ext_modules = []
if use_cython: # Cython is available, compile .pyx -> .c
ext_modules += [
Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.pyx"]),
- Extension("pyspike.cython.cython_distance", ["pyspike/cython/cython_distance.pyx"]),
+ Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.pyx"]),
+ Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.pyx"]),
]
cmdclass.update({'build_ext': build_ext})
elif use_c: # c files are there, compile to binaries
ext_modules += [
Extension("pyspike.cython.cython_add", ["pyspike/cython/cython_add.c"]),
- Extension("pyspike.cython.cython_distance", ["pyspike/cython/cython_distance.c"]),
+ Extension("pyspike.cython.cython_profiles", ["pyspike/cython/cython_profiles.c"]),
+ Extension("pyspike.cython.cython_distances", ["pyspike/cython/cython_distances.c"]),
]
# neither cython nor c files available -> automatic fall-back to python backend
setup(
name='pyspike',
packages=find_packages(exclude=['doc']),
- version='0.2.0',
+ version='0.3.0',
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
@@ -56,7 +59,6 @@ train similarity',
author_email='mario.mulanskygmx.net',
license='BSD',
url='https://github.com/mariomulansky/PySpike',
- # download_url='https://github.com/mariomulansky/PySpike/tarball/0.1',
install_requires=['numpy'],
keywords=['data analysis', 'spike', 'neuroscience'], # arbitrary keywords
classifiers=[
@@ -78,7 +80,8 @@ train similarity',
'Programming Language :: Python :: 2.7',
],
package_data={
- 'pyspike': ['cython/cython_add.c', 'cython/cython_distance.c'],
+ 'pyspike': ['cython/cython_add.c', 'cython/cython_profiles.c',
+ 'cython_distances.c'],
'test': ['Spike_testdata.txt']
}
)
diff --git a/test/test_distance.py b/test/test_distance.py
index 19da35f..e45ac16 100644
--- a/test/test_distance.py
+++ b/test/test_distance.py
@@ -196,7 +196,7 @@ def test_spike_sync():
0.4, decimal=16)
-def check_multi_profile(profile_func, profile_func_multi):
+def check_multi_profile(profile_func, profile_func_multi, dist_func_multi):
# generate spike trains:
t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0)
t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0)
@@ -213,10 +213,14 @@ def check_multi_profile(profile_func, profile_func_multi):
f_multi = profile_func_multi(spike_trains, [0, 1])
assert f_multi.almost_equal(f12, decimal=14)
+ d = dist_func_multi(spike_trains, [0, 1])
+ assert_equal(f_multi.avrg(), d)
f_multi1 = profile_func_multi(spike_trains, [1, 2, 3])
f_multi2 = profile_func_multi(spike_trains[1:])
assert f_multi1.almost_equal(f_multi2, decimal=14)
+ d = dist_func_multi(spike_trains, [1, 2, 3])
+ assert_almost_equal(f_multi1.avrg(), d, decimal=14)
f = copy(f12)
f.add(f13)
@@ -224,6 +228,8 @@ def check_multi_profile(profile_func, profile_func_multi):
f.mul_scalar(1.0/3)
f_multi = profile_func_multi(spike_trains, [0, 1, 2])
assert f_multi.almost_equal(f, decimal=14)
+ d = dist_func_multi(spike_trains, [0, 1, 2])
+ assert_almost_equal(f_multi.avrg(), d, decimal=14)
f.mul_scalar(3) # revert above normalization
f.add(f14)
@@ -235,11 +241,13 @@ def check_multi_profile(profile_func, profile_func_multi):
def test_multi_isi():
- check_multi_profile(spk.isi_profile, spk.isi_profile_multi)
+ check_multi_profile(spk.isi_profile, spk.isi_profile_multi,
+ spk.isi_distance_multi)
def test_multi_spike():
- check_multi_profile(spk.spike_profile, spk.spike_profile_multi)
+ check_multi_profile(spk.spike_profile, spk.spike_profile_multi,
+ spk.spike_distance_multi)
def test_multi_spike_sync():
diff --git a/test/test_empty.py b/test/test_empty.py
new file mode 100644
index 0000000..48be25d
--- /dev/null
+++ b/test/test_empty.py
@@ -0,0 +1,147 @@
+""" test_empty.py
+
+Tests the distance measure for empty spike trains
+
+Copyright 2015, Mario Mulansky <mario.mulansky@gmx.net>
+
+Distributed under the BSD License
+
+"""
+
+from __future__ import print_function
+import numpy as np
+from numpy.testing import assert_equal, assert_almost_equal, \
+ assert_array_equal, assert_array_almost_equal
+
+import pyspike as spk
+from pyspike import SpikeTrain
+
+
+def test_get_non_empty():
+ st = SpikeTrain([], edges=(0.0, 1.0))
+ spikes = st.get_spikes_non_empty()
+ assert_array_equal(spikes, [0.0, 1.0])
+
+ st = SpikeTrain([0.5, ], edges=(0.0, 1.0))
+ spikes = st.get_spikes_non_empty()
+ assert_array_equal(spikes, [0.0, 0.5, 1.0])
+
+
+def test_isi_empty():
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([], edges=(0.0, 1.0))
+ d = spk.isi_distance(st1, st2)
+ assert_equal(d, 0.0)
+ prof = spk.isi_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 1.0])
+ assert_array_equal(prof.y, [0.0, ])
+
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.isi_distance(st1, st2)
+ assert_equal(d, 0.6*0.4+0.4*0.6)
+ prof = spk.isi_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 0.4, 1.0])
+ assert_array_equal(prof.y, [0.6, 0.4])
+
+ st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.isi_distance(st1, st2)
+ assert_almost_equal(d, 0.2/0.6*0.4 + 0.0 + 0.2/0.6*0.4, decimal=15)
+ prof = spk.isi_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
+ assert_array_almost_equal(prof.y, [0.2/0.6, 0.0, 0.2/0.6], decimal=15)
+
+
+def test_spike_empty():
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([], edges=(0.0, 1.0))
+ d = spk.spike_distance(st1, st2)
+ assert_equal(d, 0.0)
+ prof = spk.spike_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 1.0])
+ assert_array_equal(prof.y1, [0.0, ])
+ assert_array_equal(prof.y2, [0.0, ])
+
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.spike_distance(st1, st2)
+ assert_almost_equal(d, 0.4*0.4*1.0/(0.4+1.0)**2 + 0.6*0.4*1.0/(0.6+1.0)**2,
+ decimal=15)
+ prof = spk.spike_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 0.4, 1.0])
+ assert_array_almost_equal(prof.y1, [0.0, 2*0.4*1.0/(0.6+1.0)**2],
+ decimal=15)
+ assert_array_almost_equal(prof.y2, [2*0.4*1.0/(0.4+1.0)**2, 0.0],
+ decimal=15)
+
+ st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.spike_distance(st1, st2)
+ s1 = np.array([0.0, 0.4*0.2/0.6, 0.2, 0.0])
+ s2 = np.array([0.0, 0.2, 0.2*0.4/0.6, 0.0])
+ isi1 = np.array([0.6, 0.6, 0.4])
+ isi2 = np.array([0.4, 0.6, 0.6])
+ expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2)
+ expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2)
+ expected_times = np.array([0.0, 0.4, 0.6, 1.0])
+ expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
+ (expected_y1+expected_y2)/2)
+ expected_spike_val /= (expected_times[-1]-expected_times[0])
+
+ assert_almost_equal(d, expected_spike_val, decimal=15)
+ prof = spk.spike_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
+ assert_array_almost_equal(prof.y1, expected_y1, decimal=15)
+ assert_array_almost_equal(prof.y2, expected_y2, decimal=15)
+
+
+def test_spike_sync_empty():
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([], edges=(0.0, 1.0))
+ d = spk.spike_sync(st1, st2)
+ assert_equal(d, 1.0)
+ prof = spk.spike_sync_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 1.0])
+ assert_array_equal(prof.y, [1.0, 1.0])
+
+ st1 = SpikeTrain([], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.spike_sync(st1, st2)
+ assert_equal(d, 0.0)
+ prof = spk.spike_sync_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_equal(prof.x, [0.0, 0.4, 1.0])
+ assert_array_equal(prof.y, [0.0, 0.0, 0.0])
+
+ st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
+ d = spk.spike_sync(st1, st2)
+ assert_almost_equal(d, 1.0, decimal=15)
+ prof = spk.spike_sync_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
+ assert_array_almost_equal(prof.y, [1.0, 1.0, 1.0, 1.0], decimal=15)
+
+ st1 = SpikeTrain([0.2, ], edges=(0.0, 1.0))
+ st2 = SpikeTrain([0.8, ], edges=(0.0, 1.0))
+ d = spk.spike_sync(st1, st2)
+ assert_almost_equal(d, 0.0, decimal=15)
+ prof = spk.spike_sync_profile(st1, st2)
+ assert_equal(d, prof.avrg())
+ assert_array_almost_equal(prof.x, [0.0, 0.2, 0.8, 1.0], decimal=15)
+ assert_array_almost_equal(prof.y, [0.0, 0.0, 0.0, 0.0], decimal=15)
+
+
+if __name__ == "__main__":
+ test_get_non_empty()
+ test_isi_empty()
+ test_spike_empty()
+ test_spike_sync_empty()
diff --git a/test/test_function.py b/test/test_function.py
index d81b03a..92d378d 100644
--- a/test/test_function.py
+++ b/test/test_function.py
@@ -10,7 +10,8 @@ Distributed under the BSD License
from __future__ import print_function
import numpy as np
from copy import copy
-from numpy.testing import assert_almost_equal, assert_array_almost_equal
+from numpy.testing import assert_equal, assert_almost_equal, \
+ assert_array_equal, assert_array_almost_equal
import pyspike as spk
@@ -20,6 +21,20 @@ def test_pwc():
x = [0.0, 1.0, 2.0, 2.5, 4.0]
y = [1.0, -0.5, 1.5, 0.75]
f = spk.PieceWiseConstFunc(x, y)
+
+ # function values
+ assert_equal(f(0.0), 1.0)
+ assert_equal(f(0.5), 1.0)
+ assert_equal(f(1.0), 0.25)
+ assert_equal(f(2.0), 0.5)
+ assert_equal(f(2.25), 1.5)
+ assert_equal(f(2.5), 2.25/2)
+ assert_equal(f(3.5), 0.75)
+ assert_equal(f(4.0), 0.75)
+
+ assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]),
+ [1.0, 1.0, 0.25, 0.5, 1.5, 2.25/2, 0.75, 0.75])
+
xp, yp = f.get_plottable_data()
xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0]
@@ -38,11 +53,17 @@ def test_pwc():
assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16)
a = f.avrg([1.0, 4.0])
assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.5*0.75)/3.0, decimal=16)
+ a = f.avrg([0.0, 2.2])
+ assert_almost_equal(a, (1.0*1.0-0.5*1.0+0.2*1.5)/2.2, decimal=15)
# averaging over multiple intervals
a = f.avrg([(0.5, 1.5), (1.5, 3.5)])
assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16)
+ # averaging over multiple intervals
+ a = f.avrg([(0.5, 1.5), (2.2, 3.5)])
+ assert_almost_equal(a, (0.5*1.0-0.5*0.5+0.3*1.5+1.0*0.75)/2.3, decimal=15)
+
def test_pwc_add():
# some random data
@@ -105,6 +126,20 @@ def test_pwl():
y1 = [1.0, -0.5, 1.5, 0.75]
y2 = [1.5, -0.4, 1.5, 0.25]
f = spk.PieceWiseLinFunc(x, y1, y2)
+
+ # function values
+ assert_equal(f(0.0), 1.0)
+ assert_equal(f(0.5), 1.25)
+ assert_equal(f(1.0), 0.5)
+ assert_equal(f(2.0), 1.1/2)
+ assert_equal(f(2.25), 1.5)
+ assert_equal(f(2.5), 2.25/2)
+ assert_equal(f(3.5), 0.75-0.5*1.0/1.5)
+ assert_equal(f(4.0), 0.25)
+
+ assert_array_equal(f([0.0, 0.5, 1.0, 2.0, 2.25, 2.5, 3.5, 4.0]),
+ [1.0, 1.25, 0.5, 0.55, 1.5, 2.25/2, 0.75-0.5/1.5, 0.25])
+
xp, yp = f.get_plottable_data()
xp_expected = [0.0, 1.0, 1.0, 2.0, 2.0, 2.5, 2.5, 4.0]