summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMario Mulansky <mario.mulansky@gmx.net>2015-12-14 14:24:14 +0100
committerMario Mulansky <mario.mulansky@gmx.net>2015-12-14 14:24:14 +0100
commit776d8d686f9c19a729038270f69872801bba43a2 (patch)
tree0b4f2bc756bd0fe434360a8af5a920e76c5352f8 /test
parentb970055641b215d30b671ee810e29c6a55e6214a (diff)
parent0dbdc0096dacc1f6233600ed6e36487bbab6b718 (diff)
Merge branch 'develop' of github.com:mariomulansky/PySpike into develop
Diffstat (limited to 'test')
-rw-r--r--test/test_distance.py20
-rw-r--r--test/test_regression/test_regression_15.py38
-rw-r--r--test/test_spikes.py9
3 files changed, 36 insertions, 31 deletions
diff --git a/test/test_distance.py b/test/test_distance.py
index 626b438..8cf81e2 100644
--- a/test/test_distance.py
+++ b/test/test_distance.py
@@ -17,6 +17,8 @@ from numpy.testing import assert_equal, assert_almost_equal, \
import pyspike as spk
from pyspike import SpikeTrain
+import os
+TEST_PATH = os.path.dirname(os.path.realpath(__file__))
def test_isi():
# generate two spike trains:
@@ -294,8 +296,8 @@ def test_multi_spike_sync():
expected, decimal=15)
# multivariate regression test
- spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
- edges=[0, 4000])
+ spike_trains = spk.load_spike_trains_from_txt(
+ os.path.join(TEST_PATH, "SPIKE_Sync_Test.txt"), edges=[0, 4000])
# extract all spike times
spike_times = np.array([])
for st in spike_trains:
@@ -328,10 +330,10 @@ def check_dist_matrix(dist_func, dist_matrix_func):
f_matrix = dist_matrix_func(spike_trains)
# check zero diagonal
- for i in xrange(4):
+ for i in range(4):
assert_equal(0.0, f_matrix[i, i])
- for i in xrange(4):
- for j in xrange(i+1, 4):
+ for i in range(4):
+ for j in range(i+1, 4):
assert_equal(f_matrix[i, j], f_matrix[j, i])
assert_equal(f12, f_matrix[1, 0])
assert_equal(f13, f_matrix[2, 0])
@@ -371,8 +373,8 @@ def test_regression_spiky():
# multivariate check
- spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
- (0.0, 4000.0))
+ spike_trains = spk.load_spike_trains_from_txt(
+ os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
isi_dist = spk.isi_distance_multi(spike_trains)
# get the full precision from SPIKY
assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)
@@ -409,8 +411,8 @@ def test_regression_spiky():
def test_multi_variate_subsets():
- spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
- (0.0, 4000.0))
+ spike_trains = spk.load_spike_trains_from_txt(
+ os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
sub_set = [1, 3, 5, 7]
spike_trains_sub_set = [spike_trains[i] for i in sub_set]
diff --git a/test/test_regression/test_regression_15.py b/test/test_regression/test_regression_15.py
index 1ce1290..dcacae2 100644
--- a/test/test_regression/test_regression_15.py
+++ b/test/test_regression/test_regression_15.py
@@ -8,68 +8,70 @@ Distributed under the BSD License
"""
+from __future__ import division
+
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_almost_equal
import pyspike as spk
+import os
+TEST_PATH = os.path.dirname(os.path.realpath(__file__))
+TEST_DATA = os.path.join(TEST_PATH, "..", "SPIKE_Sync_Test.txt")
def test_regression_15_isi():
# load spike trains
- spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
- edges=[0, 4000])
+ spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])
N = len(spike_trains)
dist_mat = spk.isi_distance_matrix(spike_trains)
assert_equal(dist_mat.shape, (N, N))
- ind = np.arange(N/2)
+ ind = np.arange(N//2)
dist_mat = spk.isi_distance_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
- ind = np.arange(N/2, N)
+ ind = np.arange(N//2, N)
dist_mat = spk.isi_distance_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
def test_regression_15_spike():
# load spike trains
- spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
- edges=[0, 4000])
+ spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])
N = len(spike_trains)
dist_mat = spk.spike_distance_matrix(spike_trains)
assert_equal(dist_mat.shape, (N, N))
- ind = np.arange(N/2)
+ ind = np.arange(N//2)
dist_mat = spk.spike_distance_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
- ind = np.arange(N/2, N)
+ ind = np.arange(N//2, N)
dist_mat = spk.spike_distance_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
def test_regression_15_sync():
# load spike trains
- spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
- edges=[0, 4000])
+ spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])
N = len(spike_trains)
dist_mat = spk.spike_sync_matrix(spike_trains)
assert_equal(dist_mat.shape, (N, N))
- ind = np.arange(N/2)
+ ind = np.arange(N//2)
dist_mat = spk.spike_sync_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
- ind = np.arange(N/2, N)
+ ind = np.arange(N//2, N)
dist_mat = spk.spike_sync_matrix(spike_trains, ind)
- assert_equal(dist_mat.shape, (N/2, N/2))
+ assert_equal(dist_mat.shape, (N//2, N//2))
if __name__ == "__main__":
diff --git a/test/test_spikes.py b/test/test_spikes.py
index d4eb131..609a819 100644
--- a/test/test_spikes.py
+++ b/test/test_spikes.py
@@ -13,10 +13,12 @@ from numpy.testing import assert_equal
import pyspike as spk
+import os
+TEST_PATH = os.path.dirname(os.path.realpath(__file__))
+TEST_DATA = os.path.join(TEST_PATH, "PySpike_testdata.txt")
def test_load_from_txt():
- spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
- edges=(0, 4000))
+ spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))
assert len(spike_trains) == 40
# check the first spike train
@@ -48,8 +50,7 @@ def check_merged_spikes(merged_spikes, spike_trains):
def test_merge_spike_trains():
# first load the data
- spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
- edges=(0, 4000))
+ spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))
merged_spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]])
# test if result is sorted