diff options
author | Mario Mulansky <mario.mulansky@gmx.net> | 2018-09-20 10:49:42 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-09-20 10:49:42 -0700 |
commit | 34bd30415dd93a2425ce566627e24ee9483ada3e (patch) | |
tree | dcfa9164d46e3cf501a1e8dcf4970f350063561a /test/test_function.py | |
parent | 44d23620d2faa78ca74437fbd3f1b95da722a853 (diff) |
Spike Order support (#39)0.6.0
* reorganized directionality module
* further refactoring of directionality
* completed python directionality backend
* added SPIKE-Sync based filtering
new function filter_by_spike_sync removes spikes that have a multi-variate
Spike Sync value below some threshold
not yet fully tested, python backend missing.
* spike sync filtering, cython sim ann
Added function for filtering out events based on a threshold for the spike
sync values. Usefull for focusing on synchronous events during directionality
analysis.
Also added cython version of simulated annealing for performance.
* added coincidence single profile to python backend
missing function in python backend added, identified and fixed a bug in the
implementation as well
* updated test case to new spike sync behavior
* python3 fixes
* another python3 fix
* reorganized directionality module
* further refactoring of directionality
* completed python directionality backend
* added SPIKE-Sync based filtering
new function filter_by_spike_sync removes spikes that have a multi-variate
Spike Sync value below some threshold
not yet fully tested, python backend missing.
* spike sync filtering, cython sim ann
Added function for filtering out events based on a threshold for the spike
sync values. Usefull for focusing on synchronous events during directionality
analysis.
Also added cython version of simulated annealing for performance.
* added coincidence single profile to python backend
missing function in python backend added, identified and fixed a bug in the
implementation as well
* updated test case to new spike sync behavior
* python3 fixes
* another python3 fix
* Fix absolute imports in directionality measures
* remove commented code
* Add directionality to docs, bump version
* Clean up directionality module, add doxy.
* Remove debug print from tests
* Fix bug in calling Python backend
* Fix incorrect integrals in PieceWiseConstFunc (#36)
* Add (some currently failing) tests for PieceWiseConstFunc.integral
* Fix implementation of PieceWiseConstFunc.integral
Just by adding a special condition for when we are only taking an
integral "between" two edges of a PieceWiseConstFunc
All tests now pass.
Fixes #33.
* Add PieceWiseConstFunc.integral tests for ValueError
* Add testing bounds of integral
* Raise ValueError in function implementation
* Fix incorrect integrals in PieceWiseLinFunc (#38)
Integrals of piece-wise linear functions were incorrect if the
requested interval lies completely between two support points.
This has been fixed, and a unit test exercising this behavior
was added.
Fixes #38
* Add Spike Order example and Tutorial section
Adds an example computing spike order profile and the optimal
spike train order. Also adds a section on spike train order to the
tutorial.
Diffstat (limited to 'test/test_function.py')
-rw-r--r-- | test/test_function.py | 62 |
1 files changed, 62 insertions, 0 deletions
diff --git a/test/test_function.py b/test/test_function.py index 92d378d..6c04839 100644 --- a/test/test_function.py +++ b/test/test_function.py @@ -10,6 +10,7 @@ Distributed under the BSD License from __future__ import print_function import numpy as np from copy import copy +from nose.tools import raises from numpy.testing import assert_equal, assert_almost_equal, \ assert_array_equal, assert_array_almost_equal @@ -49,6 +50,8 @@ def test_pwc(): assert_almost_equal(a, (0.5-0.5+0.5*1.5+1.0*0.75)/3.0, decimal=16) a = f.avrg([1.5, 3.5]) assert_almost_equal(a, (-0.5*0.5+0.5*1.5+1.0*0.75)/2.0, decimal=16) + a = f.avrg([1.0, 2.0]) + assert_almost_equal(a, (1.0*-0.5)/1.0, decimal=16) a = f.avrg([1.0, 3.5]) assert_almost_equal(a, (-0.5*1.0+0.5*1.5+1.0*0.75)/2.5, decimal=16) a = f.avrg([1.0, 4.0]) @@ -120,6 +123,53 @@ def test_pwc_avrg(): assert_array_almost_equal(f1.x, x_expected, decimal=16) assert_array_almost_equal(f1.y, y_expected, decimal=16) +def test_pwc_integral(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + + # test full interval + full = 1.0*1.0 + 1.0*-0.5 + 0.5*1.5 + 1.5*0.75; + assert_equal(f1.integral(), full) + assert_equal(f1.integral((np.min(x),np.max(x))), full) + # test part interval, spanning an edge + assert_equal(f1.integral((0.5,1.5)), 0.5*1.0 + 0.5*-0.5) + # test part interval, just over two edges + assert_almost_equal(f1.integral((1.0-1e-16,2+1e-16)), 1.0*-0.5, decimal=14) + # test part interval, between two edges + assert_equal(f1.integral((1.0,2.0)), 1.0*-0.5) + assert_equal(f1.integral((1.2,1.7)), (1.7-1.2)*-0.5) + # test part interval, start to before and after edge + assert_equal(f1.integral((0.0,0.7)), 0.7*1.0) + assert_equal(f1.integral((0.0,1.1)), 1.0*1.0+0.1*-0.5) + # test part interval, before and after edge till end + assert_equal(f1.integral((2.6,4.0)), (4.0-2.6)*0.75) + assert_equal(f1.integral((2.4,4.0)), (2.5-2.4)*1.5+(4-2.5)*0.75) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_inv(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((3,2)) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_oob_1(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((1,6)) + +@raises(ValueError) +def test_pwc_integral_bad_bounds_oob_2(): + # some random data + x = [0.0, 1.0, 2.0, 2.5, 4.0] + y = [1.0, -0.5, 1.5, 0.75] + f1 = spk.PieceWiseConstFunc(x, y) + f1.integral((-1,3)) def test_pwl(): x = [0.0, 1.0, 2.0, 2.5, 4.0] @@ -162,6 +212,18 @@ def test_pwl(): a = f.avrg([1.0, 4.0]) assert_almost_equal(a, (-0.45 + 0.75 + 1.5*0.5) / 3.0, decimal=16) + # interval between support points + a = f.avrg([1.1, 1.5]) + assert_almost_equal(a, (-0.5+0.1*0.1 - 0.45) * 0.5, decimal=14) + + # starting at a support point + a = f.avrg([1.0, 1.5]) + assert_almost_equal(a, (-0.5 - 0.45) * 0.5, decimal=14) + + # start and end at support point + a = f.avrg([1.0, 2.0]) + assert_almost_equal(a, (-0.5 - 0.4) * 0.5, decimal=14) + # averaging over multiple intervals a = f.avrg([(0.5, 1.5), (1.5, 2.5)]) assert_almost_equal(a, (1.375*0.5 - 0.45 + 0.75)/2.0, decimal=16) |