"""
This module contains functions to create network topologies and initialize weights. Currently, only the
random connectivity topology is implemented.
"""
import numpy as np
from scipy.sparse import coo_array
from pdb import set_trace
def_rng = np.random.default_rng()
[docs]
def create_topology(N, N_syn, allow_autapse=False, topology='random', rng=None):
"""
Create sparse connectivity matrix following specified topology rules.
Parameters
----------
N : int
Number of neurons
N_syn : int
Number of synapses (determines sparsity)
allow_autapse : bool, optional
Whether to allow self-connections (default False)
topology : str, optional
Type of topology to create (default 'random')
Currently supported: 'random'
rng : numpy.random.Generator, optional
Random number generator instance
Returns
-------
scipy.sparse.csr_matrix
Sparse adjacency matrix of shape (N, N)
"""
if rng is None:
rng = def_rng
if topology == 'random':
idxs = np.random.choice(N**2, N_syn, replace=False)
idxs = np.sort(idxs) # it is important to sort out the indices for sparse representation
rows = (idxs//N).astype(int)
cols = idxs % N
if not allow_autapse:
# For each post neuron which is connected to itself, we pick a
# new presynapse from 0 to N-1 which is different from all
# presynapses of that particular neuron
autapses = cols[rows == cols] # index in sparse matrix
new_pres = []
for autapse in autapses:
pres_of_autapse = cols[rows == autapse] # all pres of a post
new_pre = autapse
while new_pre in pres_of_autapse:
new_pre = np.random.randint(N)
# print(autapse, pres_of_autapse, new_pre)
new_pres.append( new_pre )
cols[np.where(rows == cols)] = new_pres
A_adj = coo_array((np.ones_like(rows), (rows, cols)),
shape=(N, N)).tocsr()
assert(N_syn == A_adj.nnz)
else:
raise NotImplementedError(f'Currently we only support random topology. {topology} was requested.')
return A_adj
[docs]
def intialize_weights(N_input, N, N_output,
p, f, mu_e, sigma_0,
return_adj=True, rng=None):
"""
Creates three weight matrices:
- the feedforward weights ``w_ff`` mapping inputs to neurons,
- the recurrent weights ``w`` between neurons, and
- an empty feedback weight array ``w_fb`` (a placeholder for future use).
The recurrent weights are initialized to ensure excitatory/inhibitory balance, whereas
the feedforward weights are initialized from a centered normal distribution.
Parameters
----------
N_input : int
Number of input dimensions
N : int
Number of neurons
N_output : int
Number of output dimensions
p : float
Connection probability
f : float
Fraction of excitatory neurons
mu_e : float
Mean excitatory weight. The mean inhibitory weight is computed based on f to ensure E/I balance.
sigma_0 : float
Recurrent weight standard deviation (same for both excitatory and inhibitory)
return_adj : bool, optional
Whether to return adjacency matrices. Default is True.
rng : numpy.random.Generator, optional
Random number generator instance
Returns
-------
w_ff : ndarray
Input weights of shape (N, N_input)
w : ndarray
Recurrent weights of shape (N, N)
w_fb : ndarray
Feedback weights of shape (N, N_output)
.. note::
- The recurrent weights are scaled by :math:`1/\\sqrt{Np}` to ensure the expected recurrent input
variance, independent of the network size and connection probability, is equal to ``sigma_0``.
- The feedforward weights are scaled by :math:`1/\\sqrt{dim_{input}}` to ensure that feedforward
input variance on each neuron is equal to ``1`` independent of the input dimension.
- The feedforward and recurrent gains (:math:`J_u` and :math:`J`, respectively), will be applied
later during integrating the dynamical system.
"""
if rng is None:
rng = def_rng
# treat special cases for mu_i to ensure E/I balance
if f==0: # no excitatory
mu_i = -mu_e
elif f==1: # no inhibitory
mu_i = 0 # doesn't matter as there's no inhibitory pop anyways
else:
mu_i = -f/(1-f) * mu_e
std_e = std_i = sigma_0
N_syn = int(round(N**2 * p))
Ne = int(N*f) # number of neurons
# Ni = N - Ne # number of neurons
## FEED-FORWARD WEIGHTS ##
w_in = rng.normal(0,1, size=(N, N_input))
w_in /= np.sqrt(N_input)
# even though we know Ne and Ni, the randomness of A_adj
# precludes us from knowing which first elements of A_adj
# are in the inhibitory/excitatory population. Thus, we
# look at their pre and post indices. (row is post)
A_adj = create_topology(N=N, N_syn=N_syn)
posts, pres = A_adj.nonzero()
w_rec = np.zeros(N_syn)
pres_e = pres<Ne
w_rec[pres_e] = rng.normal(mu_e, std_e, size=sum(pres_e))
w_rec[~pres_e] = rng.normal(mu_i, std_i, size=sum(~pres_e))
w_rec /= np.sqrt(N*p) # to make variance independent of N and p
## FEEDBACK WEIGHTS ##
w_out = rng.normal(0,1, size=(N_output, N))
w_out /= np.sqrt(N_output)
if return_adj:
return w_in, w_rec, w_out, A_adj
else:
return w_in, w_rec, w_out