summaryrefslogtreecommitdiff
path: root/pyspike/spike_directionality.py
diff options
context:
space:
mode:
authorMario Mulansky <mario.mulansky@gmx.net>2015-10-10 20:45:09 +0200
committerMario Mulansky <mario.mulansky@gmx.net>2018-06-02 12:59:43 -0700
commit18ea80e2d01e9eb4ceee17219f91098efbcdf67c (patch)
treed7819736b059e9885d53c14e28160d6487d93e6c /pyspike/spike_directionality.py
parenta5e6a12a619cb9528a4cf7f3ef8f082e5eb877c2 (diff)
spike sync filtering, cython sim ann
Added function for filtering out events based on a threshold for the spike sync values. Usefull for focusing on synchronous events during directionality analysis. Also added cython version of simulated annealing for performance.
Diffstat (limited to 'pyspike/spike_directionality.py')
-rw-r--r--pyspike/spike_directionality.py54
1 files changed, 33 insertions, 21 deletions
diff --git a/pyspike/spike_directionality.py b/pyspike/spike_directionality.py
index cda7fe3..e1f5f16 100644
--- a/pyspike/spike_directionality.py
+++ b/pyspike/spike_directionality.py
@@ -242,27 +242,39 @@ def optimal_spike_train_order_from_matrix(D, full_output=False):
p = np.arange(N)
- T = 2*np.max(D) # starting temperature
- T_end = 1E-5 * T # final temperature
- alpha = 0.9 # cooling factor
- total_iter = 0
- while T > T_end:
- iterations = 0
- succ_iter = 0
- while iterations < 100*N and succ_iter < 10*N:
- # exchange two rows and cols
- ind1 = np.random.randint(N-1)
- delta_A = -2*D[p[ind1], p[ind1+1]]
- if delta_A > 0.0 or exp(delta_A/T) > np.random.random():
- # swap indices
- p[ind1], p[ind1+1] = p[ind1+1], p[ind1]
- A += delta_A
- succ_iter += 1
- iterations += 1
- total_iter += iterations
- T *= alpha # cool down
- if succ_iter == 0:
- break
+ T_start = 2*np.max(D) # starting temperature
+ T_end = 1E-5 * T_start # final temperature
+ alpha = 0.9 # cooling factor
+
+ from cython.cython_simulated_annealing import sim_ann_cython as sim_ann
+
+ p, A, total_iter = sim_ann(D, T_start, T_end, alpha)
+
+ # T = T_start
+ # total_iter = 0
+ # while T > T_end:
+ # iterations = 0
+ # succ_iter = 0
+ # # equilibrate for 100*N steps or 10*N successful steps
+ # while iterations < 100*N and succ_iter < 10*N:
+ # # exchange two rows and cols
+ # ind1 = np.random.randint(N-1)
+ # if ind1 < N-1:
+ # ind2 = ind1+1
+ # else: # this can never happend
+ # ind2 = 0
+ # delta_A = -2*D[p[ind1], p[ind2]]
+ # if delta_A > 0.0 or exp(delta_A/T) > np.random.random():
+ # # swap indices
+ # p[ind1], p[ind2] = p[ind2], p[ind1]
+ # A += delta_A
+ # succ_iter += 1
+ # iterations += 1
+ # total_iter += iterations
+ # T *= alpha # cool down
+ # if succ_iter == 0:
+ # break
+
if full_output:
return p, A, total_iter
else: